summaryrefslogtreecommitdiff
path: root/libs
diff options
context:
space:
mode:
Diffstat (limited to 'libs')
-rw-r--r--libs/libevent/docs/.gitignore135
-rw-r--r--libs/libevent/docs/.travis.yml21
-rw-r--r--libs/libevent/docs/CMakeLists.txt1428
-rw-r--r--libs/libevent/docs/ChangeLog1402
-rw-r--r--libs/libevent/docs/ChangeLog-1.4231
-rw-r--r--libs/libevent/docs/ChangeLog-2.01280
-rw-r--r--libs/libevent/docs/Doxyfile257
-rw-r--r--libs/libevent/docs/LICENSE99
-rw-r--r--libs/libevent/docs/Makefile.am303
-rw-r--r--libs/libevent/docs/Makefile.nmake82
-rw-r--r--libs/libevent/docs/README.md397
-rw-r--r--libs/libevent/docs/appveyor.yml45
-rw-r--r--libs/libevent/docs/autogen.sh15
-rw-r--r--libs/libevent/docs/cmake/AddCompilerFlags.cmake15
-rw-r--r--libs/libevent/docs/cmake/COPYING-CMAKE-SCRIPTS22
-rw-r--r--libs/libevent/docs/cmake/CheckFileOffsetBits.c14
-rw-r--r--libs/libevent/docs/cmake/CheckFileOffsetBits.cmake43
-rw-r--r--libs/libevent/docs/cmake/CheckFunctionExistsEx.c30
-rw-r--r--libs/libevent/docs/cmake/CheckFunctionExistsEx.cmake69
-rw-r--r--libs/libevent/docs/cmake/CheckFunctionKeywords.cmake14
-rw-r--r--libs/libevent/docs/cmake/CheckPrototypeDefinition.c.in29
-rw-r--r--libs/libevent/docs/cmake/CheckPrototypeDefinition.cmake84
-rw-r--r--libs/libevent/docs/cmake/CheckWorkingKqueue.cmake52
-rw-r--r--libs/libevent/docs/cmake/CodeCoverage.cmake162
-rw-r--r--libs/libevent/docs/cmake/Copyright.txt57
-rw-r--r--libs/libevent/docs/cmake/FindGit.cmake45
-rw-r--r--libs/libevent/docs/cmake/LibeventConfig.cmake.in17
-rw-r--r--libs/libevent/docs/cmake/LibeventConfigBuildTree.cmake.in17
-rw-r--r--libs/libevent/docs/cmake/LibeventConfigVersion.cmake.in11
-rw-r--r--libs/libevent/docs/cmake/VersionViaGit.cmake53
-rw-r--r--libs/libevent/docs/configure.ac919
-rw-r--r--libs/libevent/docs/devpoll.c311
-rw-r--r--libs/libevent/docs/epoll.c540
-rw-r--r--libs/libevent/docs/epoll_sub.c66
-rw-r--r--libs/libevent/docs/evconfig-private.h.cmake35
-rw-r--r--libs/libevent/docs/evconfig-private.h.in48
-rw-r--r--libs/libevent/docs/evdns.3322
-rw-r--r--libs/libevent/docs/event-config.h.cmake534
-rw-r--r--libs/libevent/docs/event.3624
-rw-r--r--libs/libevent/docs/event_rpcgen.py1728
-rw-r--r--libs/libevent/docs/evport.c451
-rw-r--r--libs/libevent/docs/evthread_pthread.c191
-rw-r--r--libs/libevent/docs/kqueue-internal.h39
-rw-r--r--libs/libevent/docs/kqueue.c567
-rw-r--r--libs/libevent/docs/libevent.pc.in16
-rw-r--r--libs/libevent/docs/libevent_openssl.pc.in16
-rw-r--r--libs/libevent/docs/libevent_pthreads.pc.in16
-rw-r--r--libs/libevent/docs/m4/ac_backport_259_ssizet.m43
-rw-r--r--libs/libevent/docs/m4/acx_pthread.m4279
-rw-r--r--libs/libevent/docs/m4/libevent_openssl.m447
-rw-r--r--libs/libevent/docs/m4/ntp_pkg_config.m427
-rw-r--r--libs/libevent/docs/make-event-config.sed23
-rw-r--r--libs/libevent/docs/make_epoll_table.py63
-rw-r--r--libs/libevent/docs/poll.c341
-rw-r--r--libs/libevent/docs/sample/dns-example.c257
-rw-r--r--libs/libevent/docs/sample/event-read-fifo.c162
-rw-r--r--libs/libevent/docs/sample/hello-world.c141
-rw-r--r--libs/libevent/docs/sample/hostcheck.c217
-rw-r--r--libs/libevent/docs/sample/hostcheck.h30
-rw-r--r--libs/libevent/docs/sample/http-connect.c119
-rw-r--r--libs/libevent/docs/sample/http-server.c418
-rw-r--r--libs/libevent/docs/sample/https-client.c494
-rw-r--r--libs/libevent/docs/sample/include.am53
-rw-r--r--libs/libevent/docs/sample/le-proxy.c288
-rw-r--r--libs/libevent/docs/sample/openssl_hostname_validation.c173
-rw-r--r--libs/libevent/docs/sample/openssl_hostname_validation.h56
-rw-r--r--libs/libevent/docs/sample/signal-test.c75
-rw-r--r--libs/libevent/docs/sample/time-test.c107
-rw-r--r--libs/libevent/docs/select.c346
-rw-r--r--libs/libevent/docs/test/Makefile.nmake79
-rw-r--r--libs/libevent/docs/test/bench.c207
-rw-r--r--libs/libevent/docs/test/bench_cascade.c188
-rw-r--r--libs/libevent/docs/test/bench_http.c195
-rw-r--r--libs/libevent/docs/test/bench_httpclient.c239
-rw-r--r--libs/libevent/docs/test/check-dumpevents.py54
-rw-r--r--libs/libevent/docs/test/include.am146
-rw-r--r--libs/libevent/docs/test/print-winsock-errors.c84
-rw-r--r--libs/libevent/docs/test/regress.c3401
-rw-r--r--libs/libevent/docs/test/regress.h144
-rw-r--r--libs/libevent/docs/test/regress.rpc25
-rw-r--r--libs/libevent/docs/test/regress_buffer.c2281
-rw-r--r--libs/libevent/docs/test/regress_bufferevent.c1284
-rw-r--r--libs/libevent/docs/test/regress_dns.c2151
-rw-r--r--libs/libevent/docs/test/regress_et.c208
-rw-r--r--libs/libevent/docs/test/regress_finalize.c347
-rw-r--r--libs/libevent/docs/test/regress_http.c4335
-rw-r--r--libs/libevent/docs/test/regress_iocp.c352
-rw-r--r--libs/libevent/docs/test/regress_listener.c214
-rw-r--r--libs/libevent/docs/test/regress_main.c468
-rw-r--r--libs/libevent/docs/test/regress_minheap.c99
-rw-r--r--libs/libevent/docs/test/regress_rpc.c905
-rw-r--r--libs/libevent/docs/test/regress_ssl.c781
-rw-r--r--libs/libevent/docs/test/regress_testutils.c229
-rw-r--r--libs/libevent/docs/test/regress_testutils.h67
-rw-r--r--libs/libevent/docs/test/regress_thread.c590
-rw-r--r--libs/libevent/docs/test/regress_thread.h48
-rw-r--r--libs/libevent/docs/test/regress_util.c1413
-rw-r--r--libs/libevent/docs/test/regress_zlib.c348
-rw-r--r--libs/libevent/docs/test/rpcgen_wrapper.sh52
-rw-r--r--libs/libevent/docs/test/test-changelist.c224
-rw-r--r--libs/libevent/docs/test/test-closed.c117
-rw-r--r--libs/libevent/docs/test/test-dumpevents.c179
-rw-r--r--libs/libevent/docs/test/test-eof.c124
-rw-r--r--libs/libevent/docs/test/test-fdleak.c249
-rw-r--r--libs/libevent/docs/test/test-init.c65
-rw-r--r--libs/libevent/docs/test/test-ratelim.c601
-rw-r--r--libs/libevent/docs/test/test-ratelim.sh88
-rw-r--r--libs/libevent/docs/test/test-time.c116
-rw-r--r--libs/libevent/docs/test/test-weof.c117
-rw-r--r--libs/libevent/docs/test/test.sh160
-rw-r--r--libs/libevent/docs/test/tinytest.c493
-rw-r--r--libs/libevent/docs/test/tinytest.h100
-rw-r--r--libs/libevent/docs/test/tinytest_demo.c262
-rw-r--r--libs/libevent/docs/test/tinytest_local.h12
-rw-r--r--libs/libevent/docs/test/tinytest_macros.h199
-rw-r--r--libs/libevent/docs/whatsnew-2.0.txt609
-rw-r--r--libs/libevent/docs/whatsnew-2.1.txt690
-rw-r--r--libs/libevent/include/evconfig-private.h35
-rw-r--r--libs/libevent/include/evdns.h45
-rw-r--r--libs/libevent/include/event.h83
-rw-r--r--libs/libevent/include/event2/buffer.h1076
-rw-r--r--libs/libevent/include/event2/buffer_compat.h115
-rw-r--r--libs/libevent/include/event2/bufferevent.h1021
-rw-r--r--libs/libevent/include/event2/bufferevent_compat.h100
-rw-r--r--libs/libevent/include/event2/bufferevent_ssl.h134
-rw-r--r--libs/libevent/include/event2/bufferevent_struct.h116
-rw-r--r--libs/libevent/include/event2/dns.h717
-rw-r--r--libs/libevent/include/event2/dns_compat.h336
-rw-r--r--libs/libevent/include/event2/dns_struct.h80
-rw-r--r--libs/libevent/include/event2/event-config.h534
-rw-r--r--libs/libevent/include/event2/event.h1675
-rw-r--r--libs/libevent/include/event2/event_compat.h230
-rw-r--r--libs/libevent/include/event2/event_struct.h180
-rw-r--r--libs/libevent/include/event2/http.h1170
-rw-r--r--libs/libevent/include/event2/http_compat.h90
-rw-r--r--libs/libevent/include/event2/http_struct.h152
-rw-r--r--libs/libevent/include/event2/keyvalq_struct.h80
-rw-r--r--libs/libevent/include/event2/listener.h180
-rw-r--r--libs/libevent/include/event2/rpc.h596
-rw-r--r--libs/libevent/include/event2/rpc_compat.h61
-rw-r--r--libs/libevent/include/event2/rpc_struct.h100
-rw-r--r--libs/libevent/include/event2/tag.h146
-rw-r--r--libs/libevent/include/event2/tag_compat.h49
-rw-r--r--libs/libevent/include/event2/thread.h253
-rw-r--r--libs/libevent/include/event2/util.h852
-rw-r--r--libs/libevent/include/event2/visibility.h50
-rw-r--r--libs/libevent/include/evhttp.h45
-rw-r--r--libs/libevent/include/evrpc.h45
-rw-r--r--libs/libevent/include/evutil.h39
-rw-r--r--libs/libevent/include/include.am46
-rw-r--r--libs/libevent/libevent_12.vcxproj212
-rw-r--r--libs/libevent/libevent_12.vcxproj.filters264
-rw-r--r--libs/libevent/src/WIN32-Code/getopt.c149
-rw-r--r--libs/libevent/src/WIN32-Code/getopt.h33
-rw-r--r--libs/libevent/src/WIN32-Code/getopt_long.c233
-rw-r--r--libs/libevent/src/WIN32-Code/nmake/evconfig-private.h6
-rw-r--r--libs/libevent/src/WIN32-Code/nmake/event2/event-config.h360
-rw-r--r--libs/libevent/src/WIN32-Code/tree.h677
-rw-r--r--libs/libevent/src/arc4random.c556
-rw-r--r--libs/libevent/src/buffer.c3439
-rw-r--r--libs/libevent/src/buffer_iocp.c326
-rw-r--r--libs/libevent/src/bufferevent-internal.h480
-rw-r--r--libs/libevent/src/bufferevent.c1016
-rw-r--r--libs/libevent/src/bufferevent_async.c686
-rw-r--r--libs/libevent/src/bufferevent_filter.c555
-rw-r--r--libs/libevent/src/bufferevent_openssl.c1484
-rw-r--r--libs/libevent/src/bufferevent_pair.c360
-rw-r--r--libs/libevent/src/bufferevent_ratelim.c1092
-rw-r--r--libs/libevent/src/bufferevent_sock.c707
-rw-r--r--libs/libevent/src/changelist-internal.h102
-rw-r--r--libs/libevent/src/compat/sys/queue.h488
-rw-r--r--libs/libevent/src/defer-internal.h70
-rw-r--r--libs/libevent/src/epolltable-internal.h1166
-rw-r--r--libs/libevent/src/evbuffer-internal.h351
-rw-r--r--libs/libevent/src/evdns.c4761
-rw-r--r--libs/libevent/src/event-internal.h479
-rw-r--r--libs/libevent/src/event.c3940
-rw-r--r--libs/libevent/src/event_iocp.c294
-rw-r--r--libs/libevent/src/event_tagging.c605
-rw-r--r--libs/libevent/src/evmap-internal.h117
-rw-r--r--libs/libevent/src/evmap.c1055
-rw-r--r--libs/libevent/src/evrpc-internal.h205
-rw-r--r--libs/libevent/src/evrpc.c1171
-rw-r--r--libs/libevent/src/evsignal-internal.h65
-rw-r--r--libs/libevent/src/evthread-internal.h392
-rw-r--r--libs/libevent/src/evthread.c509
-rw-r--r--libs/libevent/src/evthread_win32.c341
-rw-r--r--libs/libevent/src/evutil.c2667
-rw-r--r--libs/libevent/src/evutil_rand.c206
-rw-r--r--libs/libevent/src/evutil_time.c538
-rw-r--r--libs/libevent/src/ht-internal.h487
-rw-r--r--libs/libevent/src/http-internal.h200
-rw-r--r--libs/libevent/src/http.c4892
-rw-r--r--libs/libevent/src/iocp-internal.h201
-rw-r--r--libs/libevent/src/ipv6-internal.h83
-rw-r--r--libs/libevent/src/listener.c889
-rw-r--r--libs/libevent/src/log-internal.h83
-rw-r--r--libs/libevent/src/log.c253
-rw-r--r--libs/libevent/src/minheap-internal.h188
-rw-r--r--libs/libevent/src/mm-internal.h87
-rw-r--r--libs/libevent/src/ratelim-internal.h105
-rw-r--r--libs/libevent/src/signal.c479
-rw-r--r--libs/libevent/src/strlcpy-internal.h22
-rw-r--r--libs/libevent/src/strlcpy.c75
-rw-r--r--libs/libevent/src/time-internal.h98
-rw-r--r--libs/libevent/src/util-internal.h485
-rw-r--r--libs/libevent/src/win32select.c388
207 files changed, 93513 insertions, 0 deletions
diff --git a/libs/libevent/docs/.gitignore b/libs/libevent/docs/.gitignore
new file mode 100644
index 0000000000..01821fe1c5
--- /dev/null
+++ b/libs/libevent/docs/.gitignore
@@ -0,0 +1,135 @@
+### These files should get ignored no matter where they appear.
+
+# Editors leave these lying around
+\#*\#
+.#*
+*~
+*.swp
+
+# C stuff
+*.o
+
+# Windows stuff
+*.obj
+*.exe
+*.lib
+
+# Patch leaves these lying arround
+*.orig
+*.rej
+
+# gcov stuff
+*.gcno
+*.gcov
+*.gcda
+
+# gdb stuff
+.gdb_history
+
+# Autotools stuff
+.deps
+.dirstamp
+Makefile
+Makefile.in
+
+# Libtool stuff
+.libs
+*.lo
+*.la
+
+# ctags stuff
+TAGS
+tags
+
+# Stuff made by our makefiles
+libevent.pc
+libevent_pthreads.pc
+libevent_openssl.pc
+*.log
+*.trs
+
+## The initial / makes these files only get ignored in particular directories.
+/autom4te.cache
+
+# Libtool adds these, at least sometimes
+/m4/libtool.m4
+/m4/ltoptions.m4
+/m4/ltsugar.m4
+/m4/ltversion.m4
+/m4/lt~obsolete.m4
+
+/aclocal.m4
+/compile
+/doxygen
+/config.cache
+/config.guess
+/config.log
+/config.status
+/config.sub
+/configure
+/configure.lineno
+/depcomp
+/config.h
+/config.h.in
+/install-sh
+/libtool
+/ltmain.sh
+/missing
+/stamp-h1
+/stamp-h2
+
+/include/event2/event-config.h
+/evconfig-private.h
+
+/sample/dns-example
+/sample/event-read-fifo
+/sample/hello-world
+/sample/http-server
+/sample/http-connect
+/sample/le-proxy
+/sample/https-client
+/sample/signal-test
+/sample/time-test
+/sample/event-test
+
+/test-driver
+/test/bench
+/test/bench_cascade
+/test/bench_http
+/test/bench_httpclient
+/test/regress
+/test/regress.gen.c
+/test/regress.gen.h
+/test/rpcgen-attempted
+/test/test-dumpevents
+/test/test-eof
+/test/test-closed
+/test/test-init
+/test/test-ratelim
+/test/test-script.sh
+/test/test-time
+/test/test-weof
+/test/test-changelist
+/test/test-fdleak
+
+
+# Files generated by cmake
+/CMakeCache.txt
+/CMakeFiles/
+/CTestTestfile.cmake
+/DartConfiguration.tcl
+/LibeventConfig.cmake
+/LibeventConfigVersion.cmake
+/LibeventTargets.cmake
+/bin/
+/cmake_install.cmake
+/include/evconfig-private.h
+/lib/
+/tmp/
+/verify_tests.sh
+/event.dir
+/event_core.dir
+/event_extra.dir
+*.vcxproj
+*.sln
+*.filters
diff --git a/libs/libevent/docs/.travis.yml b/libs/libevent/docs/.travis.yml
new file mode 100644
index 0000000000..f1d8141f5a
--- /dev/null
+++ b/libs/libevent/docs/.travis.yml
@@ -0,0 +1,21 @@
+env:
+ matrix:
+ - EVENT_BUILD_METHOD=cmake EVENT_CMAKE_OPTIONS=""
+ - EVENT_BUILD_METHOD=cmake EVENT_CMAKE_OPTIONS="-DEVENT__DISABLE_OPENSSL=ON"
+ - EVENT_BUILD_METHOD=cmake EVENT_CMAKE_OPTIONS="-DEVENT__DISABLE_THREAD_SUPPORT=ON"
+ - EVENT_BUILD_METHOD=cmake EVENT_CMAKE_OPTIONS="-DEVENT__DISABLE_DEBUG_MODE=ON"
+ - EVENT_BUILD_METHOD=autotools EVENT_CONFIGURE_OPTIONS=""
+ - EVENT_BUILD_METHOD=autotools EVENT_CONFIGURE_OPTIONS="--disable-debug-mode"
+language: c
+compiler:
+ - gcc
+ - clang
+install:
+ - sudo apt-get update -qq
+ - sudo apt-get install -y -qq zlib1g-dev libssl-dev build-essential automake autoconf cmake
+script:
+ - if [ "$EVENT_BUILD_METHOD" = "autotools" ]; then ./autogen.sh && ./configure $EVENT_CONFIGURE_OPTIONS && make && make verify; fi
+ - if [ "$EVENT_BUILD_METHOD" = "cmake" ]; then mkdir build && cd build && cmake .. $EVENT_CMAKE_OPTIONS && cmake --build . && CTEST_OUTPUT_ON_FAILURE=1 cmake --build . --target verify; fi
+
+notifications:
+ irc: "irc.oftc.net#libevent"
diff --git a/libs/libevent/docs/CMakeLists.txt b/libs/libevent/docs/CMakeLists.txt
new file mode 100644
index 0000000000..708f8a1bfa
--- /dev/null
+++ b/libs/libevent/docs/CMakeLists.txt
@@ -0,0 +1,1428 @@
+#
+# Libevent CMake project
+#
+# Based on initial work by:
+# Alexey Ozeritsky
+#
+# Additional changes:
+# Brodie Thiesfield
+# Joakim Soderberg
+# Trond Norbye
+# Sergei Nikulov
+#
+# Build example:
+#
+# cd libevent
+# md build
+# cd build
+# cmake -G "Visual Studio 10" ..
+# start libevent.sln
+#
+cmake_minimum_required(VERSION 2.8.0 FATAL_ERROR)
+
+if(NOT CMAKE_BUILD_TYPE)
+ set(CMAKE_BUILD_TYPE Release
+ CACHE STRING "Set build type to Debug o Release (default Release)" FORCE)
+endif()
+
+# get rid of the extra default configurations
+# what? why would you get id of other useful build types? - Ellzey
+set(CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "Limited configurations" FORCE)
+
+project(libevent C)
+
+set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake/")
+
+
+include(CheckTypeSize)
+include(CheckFunctionExistsEx)
+include(CheckFileOffsetBits)
+include(CheckFunctionExists)
+include(CheckIncludeFile)
+include(CheckIncludeFiles)
+include(CheckVariableExists)
+include(CheckSymbolExists)
+include(CheckStructHasMember)
+include(CheckCSourceCompiles)
+include(CheckPrototypeDefinition)
+include(CheckFunctionKeywords)
+include(AddCompilerFlags)
+include(VersionViaGit)
+
+event_fuzzy_version_from_git()
+
+set(EVENT_VERSION_MAJOR ${EVENT_GIT___VERSION_MAJOR})
+set(EVENT_VERSION_MINOR ${EVENT_GIT___VERSION_MINOR})
+set(EVENT_VERSION_PATCH ${EVENT_GIT___VERSION_PATCH})
+set(EVENT_VERSION_STAGE ${EVENT_GIT___VERSION_STAGE})
+
+
+set(EVENT_ABI_MAJOR ${EVENT_VERSION_MAJOR})
+set(EVENT_ABI_MINOR ${EVENT_VERSION_MINOR})
+set(EVENT_ABI_PATCH ${EVENT_VERSION_PATCH})
+
+set(EVENT_ABI_LIBVERSION
+ "${EVENT_ABI_MAJOR}.${EVENT_ABI_MINOR}.${EVENT_ABI_PATCH}")
+
+set(EVENT_PACKAGE_VERSION
+ "${EVENT_VERSION_MAJOR}.${EVENT_VERSION_MINOR}.${EVENT_VERSION_PATCH}")
+
+set(EVENT_NUMERIC_VERSION 0x02010500)
+
+# only a subset of names can be used, defaults to "beta"
+set(EVENT_STAGE_NAME ${EVENT_VERSION_STAGE}
+ CACHE STRING "set the stage name (beta|alpha|release)")
+
+# a list that defines what can set for EVENT_STAGE_VERSION
+set(EVENT__ALLOWED_STAGE_NAMES
+ rc
+ beta
+ alpha
+ release)
+
+# attempt to find the EVENT__STAGE_VERSION in the allowed list
+# of accepted stage names, the return value is stord in
+# EVENT__STAGE_RET
+
+list(FIND EVENT__ALLOWED_STAGE_NAMES
+ ${EVENT_STAGE_NAME}
+ EVENT__STAGE_RET)
+
+if (EVENT__STAGE_RET EQUAL "-1")
+ set(EVENT_STAGE_NAME beta)
+endif()
+
+set(EVENT_VERSION
+ "${EVENT_VERSION_MAJOR}.${EVENT_VERSION_MINOR}.${EVENT_VERSION_PATCH}-${EVENT_STAGE_NAME}")
+
+option(EVENT__BUILD_SHARED_LIBRARIES
+ "Define if libevent should be built with shared libraries instead of archives" OFF)
+
+option(EVENT__DISABLE_DEBUG_MODE
+ "Define if libevent should build without support for a debug mode" OFF)
+
+option(EVENT__ENABLE_VERBOSE_DEBUG
+ "Enables verbose debugging" OFF)
+
+option(EVENT__DISABLE_MM_REPLACEMENT
+ "Define if libevent should not allow replacing the mm functions" OFF)
+
+option(EVENT__DISABLE_THREAD_SUPPORT
+ "Define if libevent should not be compiled with thread support" OFF)
+
+option(EVENT__DISABLE_OPENSSL
+ "Define if libevent should build without support for OpenSSL encrpytion" OFF)
+
+option(EVENT__DISABLE_BENCHMARK
+ "Defines if libevent should build without the benchmark exectuables" OFF)
+
+option(EVENT__DISABLE_TESTS
+ "If tests should be compiled or not" OFF)
+
+option(EVENT__DISABLE_REGRESS
+ "Disable the regress tests" OFF)
+
+option(EVENT__DISABLE_SAMPLES
+ "Disable sample files" OFF)
+
+option(EVENT__FORCE_KQUEUE_CHECK
+ "When crosscompiling forces running a test program that verifies that Kqueue works with pipes. Note that this requires you to manually run the test program on the the cross compilation target to verify that it works. See cmake documentation for try_run for more details" OFF)
+
+# TODO: Add --disable-largefile omit support for large files
+option(EVENT__COVERAGE
+"Enable running gcov to get a test coverage report (only works with GCC/CLang). Make sure to enable -DCMAKE_BUILD_TYPE=Debug as well." OFF)
+
+# Put the libaries and binaries that get built into directories at the
+# top of the build tree rather than in hard-to-find leaf directories.
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/bin)
+set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/lib)
+set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/lib)
+
+if (EVENT__ENABLE_VERBOSE_DEBUG)
+ add_definitions(-DUSE_DBUG=1)
+endif()
+
+# Setup compiler flags for coverage.
+if (EVENT__COVERAGE)
+ if ((NOT CMAKE_COMPILER_IS_GNUCC) AND (NOT ${CMAKE_CXX_COMPILER_ID} STREQAL "Clang"))
+ message(FATAL_ERROR"Trying to compile coverage support, but compiler is not GNU gcc! Try CC=/usr/bin/gcc CXX=/usr/bin/g++ cmake <options> ..")
+ endif()
+
+ if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug")
+ message(FATAL_ERROR "Coverage requires -DCMAKE_BUILD_TYPE=Debug")
+ endif()
+
+ message(STATUS "Setting coverage compiler flags")
+ add_compiler_flags(-g -O0 -fprofile-arcs -ftest-coverage)
+endif()
+
+# GCC specific options.
+if (CMAKE_COMPILER_IS_GNUCC)
+ option(EVENT__DISABLE_GCC_WARNINGS "Disable verbose warnings with GCC" OFF)
+ option(EVENT__ENABLE_GCC_HARDENING "Enable compiler security checks" OFF)
+ option(EVENT__ENABLE_GCC_FUNCTION_SECTIONS "Enable gcc function sections" OFF)
+ option(EVENT__ENABLE_GCC_WARNINGS "Make all GCC warnings into errors" OFF)
+
+ list(APPEND __FLAGS -Wall)
+
+ if (EVENT__DISABLE_GCC_WARNINGS)
+ list(APPEND __FLAGS -w)
+ endif()
+
+ if (EVENT__ENABLE_GCC_HARDENING)
+ list(APPEND __FLAGS
+ -fstack-protector-all
+ -fwrapv
+ -fPIE
+ -Wstack-protector
+ "--param ssp-buffer-size=1")
+
+ add_definitions(-D_FORTIFY_SOURCE=2)
+ endif()
+
+ if (EVENT__ENABLE_GCC_FUNCTION_SECTIONS)
+ list(APPEND __FLAGS -ffunction-sections)
+ # TODO: Add --gc-sections support. We need some checks for NetBSD to ensure this works.
+ endif()
+
+ if (EVENT__ENABLE_GCC_WARNINGS)
+ list(APPEND __FLAGS -Werror)
+ endif()
+
+ # We need to test for at least gcc 2.95 here, because older versions don't
+ # have -fno-strict-aliasing
+ list(APPEND __FLAGS -fno-strict-aliasing)
+
+ add_compiler_flags(__FLAGS)
+endif()
+
+if (APPLE)
+ # Get rid of deprecated warnings for OpenSSL on OSX 10.7 and greater.
+ add_compiler_flags(
+ -Wno-error=deprecated-declarations
+ -Qunused-arguments)
+endif()
+
+# Winsock.
+if(WIN32)
+ set(CMAKE_EXTRA_INCLUDE_FILES winsock2.h ws2tcpip.h)
+ set(CMAKE_REQUIRED_LIBRARIES ws2_32.lib)
+ set(CMAKE_REQUIRED_DEFINITIONS -FIwinsock2.h -FIws2tcpip.h)
+endif()
+
+# Check if _GNU_SOURCE is available.
+CHECK_SYMBOL_EXISTS(__GNU_LIBRARY__ "features.h" _GNU_SOURCE)
+
+if (_GNU_SOURCE)
+ add_definitions(-D_GNU_SOURCE)
+endif()
+
+CHECK_INCLUDE_FILE(sys/types.h EVENT__HAVE_SYS_TYPES_H)
+if(EVENT__HAVE_SYS_TYPES_H)
+ list(APPEND CMAKE_EXTRA_INCLUDE_FILES sys/types.h)
+endif()
+
+CHECK_INCLUDE_FILE(sys/socket.h EVENT__HAVE_SYS_SOCKET_H)
+if(EVENT__HAVE_SYS_SOCKET_H)
+ list(APPEND CMAKE_EXTRA_INCLUDE_FILES sys/socket.h)
+endif()
+
+CHECK_INCLUDE_FILE(netinet/in.h EVENT__HAVE_NETINET_IN_H)
+if(EVENT__HAVE_NETINET_IN_H)
+ list(APPEND CMAKE_EXTRA_INCLUDE_FILES netinet/in.h)
+endif()
+
+CHECK_INCLUDE_FILE(netinet/in6.h EVENT__HAVE_NETINET_IN6_H)
+if(EVENT__HAVE_NETINET_IN6_H)
+ list(APPEND CMAKE_EXTRA_INCLUDE_FILES netinet/in6.h)
+endif()
+
+CHECK_INCLUDE_FILE(unistd.h EVENT__HAVE_UNISTD_H)
+CHECK_INCLUDE_FILE(netdb.h EVENT__HAVE_NETDB_H)
+CHECK_INCLUDE_FILE(dlfcn.h EVENT__HAVE_DLFCN_H)
+CHECK_INCLUDE_FILE(arpa/inet.h EVENT__HAVE_ARPA_INET_H)
+CHECK_INCLUDE_FILE(fcntl.h EVENT__HAVE_FCNTL_H)
+if(EVENT__HAVE_FCNTL_H)
+ list(APPEND CMAKE_EXTRA_INCLUDE_FILES fcntl.h)
+endif()
+CHECK_INCLUDE_FILE(inttypes.h EVENT__HAVE_INTTYPES_H)
+CHECK_INCLUDE_FILE(memory.h EVENT__HAVE_MEMORY_H)
+CHECK_INCLUDE_FILE(poll.h EVENT__HAVE_POLL_H)
+CHECK_INCLUDE_FILE(port.h EVENT__HAVE_PORT_H)
+if(EVENT__HAVE_PORT_H)
+ list(APPEND CMAKE_EXTRA_INCLUDE_FILES port.h)
+endif()
+CHECK_INCLUDE_FILE(signal.h EVENT__HAVE_SIGNAL_H)
+CHECK_INCLUDE_FILE(stdarg.h EVENT__HAVE_STDARG_H)
+CHECK_INCLUDE_FILE(stddef.h EVENT__HAVE_STDDEF_H)
+CHECK_INCLUDE_FILE(stdint.h EVENT__HAVE_STDINT_H)
+CHECK_INCLUDE_FILE(stdlib.h EVENT__HAVE_STDLIB_H)
+CHECK_INCLUDE_FILE(strings.h EVENT__HAVE_STRINGS_H)
+CHECK_INCLUDE_FILE(string.h EVENT__HAVE_STRING_H)
+CHECK_INCLUDE_FILE(sys/devpoll.h EVENT__HAVE_SYS_DEVPOLL_H)
+CHECK_INCLUDE_FILE(sys/epoll.h EVENT__HAVE_SYS_EPOLL_H)
+CHECK_INCLUDE_FILE(sys/eventfd.h EVENT__HAVE_SYS_EVENTFD_H)
+CHECK_INCLUDE_FILE(sys/event.h EVENT__HAVE_SYS_EVENT_H)
+CHECK_INCLUDE_FILE(sys/ioctl.h EVENT__HAVE_SYS_IOCTL_H)
+CHECK_INCLUDE_FILE(sys/mman.h EVENT__HAVE_SYS_MMAN_H)
+CHECK_INCLUDE_FILE(sys/param.h EVENT__HAVE_SYS_PARAM_H)
+CHECK_INCLUDE_FILE(sys/queue.h EVENT__HAVE_SYS_QUEUE_H)
+CHECK_INCLUDE_FILE(sys/select.h EVENT__HAVE_SYS_SELECT_H)
+CHECK_INCLUDE_FILE(sys/sendfile.h EVENT__HAVE_SYS_SENDFILE_H)
+CHECK_INCLUDE_FILE(sys/stat.h EVENT__HAVE_SYS_STAT_H)
+CHECK_INCLUDE_FILE(sys/time.h EVENT__HAVE_SYS_TIME_H)
+if(EVENT__HAVE_SYS_TIME_H)
+ list(APPEND CMAKE_EXTRA_INCLUDE_FILES sys/time.h)
+endif()
+CHECK_INCLUDE_FILE(sys/uio.h EVENT__HAVE_SYS_UIO_H)
+CHECK_INCLUDE_FILES("sys/types.h;ifaddrs.h" EVENT__HAVE_IFADDRS_H)
+CHECK_INCLUDE_FILE(mach/mach_time.h EVENT__HAVE_MACH_MACH_TIME_H)
+CHECK_INCLUDE_FILE(netdb.h EVENT__HAVE_NETDB_H)
+CHECK_INCLUDE_FILE(netinet/tcp.h EVENT__HAVE_NETINET_TCP_H)
+CHECK_INCLUDE_FILE(sys/wait.h EVENT__HAVE_SYS_WAIT_H)
+CHECK_INCLUDE_FILE(sys/resource.h EVENT__HAVE_SYS_RESOURCE_H)
+CHECK_INCLUDE_FILE(sys/sysctl.h EVENT__HAVE_SYS_SYSCTL_H)
+CHECK_INCLUDE_FILE(sys/timerfd.h EVENT__HAVE_SYS_TIMERFD_H)
+
+
+CHECK_FUNCTION_EXISTS_EX(epoll_create EVENT__HAVE_EPOLL)
+CHECK_FUNCTION_EXISTS_EX(epoll_ctl EVENT__HAVE_EPOLL_CTL)
+CHECK_FUNCTION_EXISTS_EX(eventfd EVENT__HAVE_EVENTFD)
+CHECK_FUNCTION_EXISTS_EX(clock_gettime EVENT__HAVE_CLOCK_GETTIME)
+CHECK_FUNCTION_EXISTS_EX(fcntl EVENT__HAVE_FCNTL)
+CHECK_FUNCTION_EXISTS_EX(getaddrinfo EVENT__HAVE_GETADDRINFO)
+CHECK_FUNCTION_EXISTS_EX(getnameinfo EVENT__HAVE_GETNAMEINFO)
+CHECK_FUNCTION_EXISTS_EX(gettimeofday EVENT__HAVE_GETTIMEOFDAY)
+CHECK_FUNCTION_EXISTS_EX(getprotobynumber EVENT__HAVE_GETPROTOBYNUMBER)
+CHECK_FUNCTION_EXISTS_EX(getservbyname EVENT__HAVE_GETSERVBYNAME)
+CHECK_FUNCTION_EXISTS_EX(inet_ntop EVENT__HAVE_INET_NTOP)
+CHECK_FUNCTION_EXISTS_EX(inet_pton EVENT__HAVE_INET_PTON)
+CHECK_FUNCTION_EXISTS_EX(kqueue EVENT__HAVE_KQUEUE)
+CHECK_FUNCTION_EXISTS_EX(mmap EVENT__HAVE_MMAP)
+CHECK_FUNCTION_EXISTS_EX(pipe EVENT__HAVE_PIPE)
+CHECK_FUNCTION_EXISTS_EX(pipe2 EVENT__HAVE_PIPE2)
+CHECK_FUNCTION_EXISTS_EX(poll EVENT__HAVE_POLL)
+CHECK_FUNCTION_EXISTS_EX(port_create EVENT__HAVE_PORT_CREATE)
+CHECK_FUNCTION_EXISTS_EX(sendfile EVENT__HAVE_SENDFILE)
+CHECK_FUNCTION_EXISTS_EX(sigaction EVENT__HAVE_SIGACTION)
+CHECK_FUNCTION_EXISTS_EX(signal EVENT__HAVE_SIGNAL)
+CHECK_FUNCTION_EXISTS_EX(splice EVENT__HAVE_SPLICE)
+CHECK_FUNCTION_EXISTS_EX(strlcpy EVENT__HAVE_STRLCPY)
+CHECK_FUNCTION_EXISTS_EX(strsep EVENT__HAVE_STRSEP)
+CHECK_FUNCTION_EXISTS_EX(strtok_r EVENT__HAVE_STRTOK_R)
+CHECK_FUNCTION_EXISTS_EX(strtoll EVENT__HAVE_STRTOLL)
+CHECK_FUNCTION_EXISTS_EX(vasprintf EVENT__HAVE_VASPRINTF)
+CHECK_FUNCTION_EXISTS_EX(sysctl EVENT__HAVE_SYSCTL)
+CHECK_FUNCTION_EXISTS_EX(accept4 EVENT__HAVE_ACCEPT4)
+CHECK_FUNCTION_EXISTS_EX(arc4random EVENT__HAVE_ARC4RANDOM)
+CHECK_FUNCTION_EXISTS_EX(arc4random_buf EVENT__HAVE_ARC4RANDOM_BUF)
+CHECK_FUNCTION_EXISTS_EX(epoll_create1 EVENT__HAVE_EPOLL_CREATE1)
+CHECK_FUNCTION_EXISTS_EX(getegid EVENT__HAVE_GETEGID)
+CHECK_FUNCTION_EXISTS_EX(geteuid EVENT__HAVE_GETEUID)
+CHECK_FUNCTION_EXISTS_EX(getifaddrs EVENT__HAVE_GETIFADDRS)
+CHECK_FUNCTION_EXISTS_EX(issetugid EVENT__HAVE_ISSETUGID)
+CHECK_FUNCTION_EXISTS_EX(mach_absolute_time EVENT__HAVE_MACH_ABSOLUTE_TIME)
+CHECK_FUNCTION_EXISTS_EX(nanosleep EVENT__HAVE_NANOSLEEP)
+CHECK_FUNCTION_EXISTS_EX(usleep EVENT__HAVE_USLEEP)
+CHECK_FUNCTION_EXISTS_EX(timeradd EVENT__HAVE_TIMERADD)
+CHECK_FUNCTION_EXISTS_EX(timerclear EVENT__HAVE_TIMERCLEAR)
+CHECK_FUNCTION_EXISTS_EX(timercmp EVENT__HAVE_TIMERCMP)
+CHECK_FUNCTION_EXISTS_EX(timerfd_create HAVE_TIMERFD_CREATE)
+CHECK_FUNCTION_EXISTS_EX(timerisset EVENT__HAVE_TIMERISSET)
+CHECK_FUNCTION_EXISTS_EX(putenv EVENT__HAVE_PUTENV)
+CHECK_FUNCTION_EXISTS_EX(setenv EVENT__HAVE_SETENV)
+CHECK_FUNCTION_EXISTS_EX(setrlimit EVENT__HAVE_SETRLIMIT)
+CHECK_FUNCTION_EXISTS_EX(umask EVENT__HAVE_UMASK)
+CHECK_FUNCTION_EXISTS_EX(unsetenv EVENT__HAVE_UNSETENV)
+
+# Get the gethostbyname_r prototype.
+CHECK_FUNCTION_EXISTS_EX(gethostbyname_r EVENT__HAVE_GETHOSTBYNAME_R)
+
+if(EVENT__HAVE_GETHOSTBYNAME_R)
+ CHECK_PROTOTYPE_DEFINITION(gethostbyname_r
+ "int gethostbyname_r(const char *name, struct hostent *hp, struct hostent_data *hdata)"
+ "0"
+ "netdb.h"
+ EVENT__HAVE_GETHOSTBYNAME_R_3_ARG)
+
+ CHECK_PROTOTYPE_DEFINITION(gethostbyname_r
+ "struct hostent *gethostbyname_r(const char *name, struct hostent *hp, char *buf, size_t buflen, int *herr)"
+ "NULL"
+ "netdb.h"
+ EVENT__HAVE_GETHOSTBYNAME_R_5_ARG)
+
+ CHECK_PROTOTYPE_DEFINITION(gethostbyname_r
+ "int gethostbyname_r(const char *name, struct hostent *hp, char *buf, size_t buflen, struct hostent **result, int *herr)"
+ "0"
+ "netdb.h"
+ EVENT__HAVE_GETHOSTBYNAME_R_6_ARG)
+endif()
+
+if(HAVE_PORT_H AND HAVE_PORT_CREATE)
+ set(EVENT__HAVE_EVENT_PORTS 1)
+endif()
+
+if(NOT WIN32)
+ CHECK_FUNCTION_EXISTS_EX(select EVENT__HAVE_SELECT)
+endif()
+
+CHECK_TYPE_SIZE("uint8_t" EVENT__HAVE_UINT8_T)
+CHECK_TYPE_SIZE("uint16_t" EVENT__HAVE_UINT16_T)
+CHECK_TYPE_SIZE("uint32_t" EVENT__HAVE_UINT32_T)
+CHECK_TYPE_SIZE("uint64_t" EVENT__HAVE_UINT64_T)
+CHECK_TYPE_SIZE("short" EVENT__SIZEOF_SHORT BUILTIN_TYPES_ONLY)
+CHECK_TYPE_SIZE("int" EVENT__SIZEOF_INT BUILTIN_TYPES_ONLY)
+CHECK_TYPE_SIZE("unsigned" EVENT__SIZEOF_UNSIGNED BUILTIN_TYPES_ONLY)
+CHECK_TYPE_SIZE("unsigned int" EVENT__SIZEOF_UNSIGNED_INT BUILTIN_TYPES_ONLY)
+CHECK_TYPE_SIZE("long" EVENT__SIZEOF_LONG BUILTIN_TYPES_ONLY)
+CHECK_TYPE_SIZE("long long" EVENT__SIZEOF_LONG_LONG BUILTIN_TYPES_ONLY)
+
+if(WIN32)
+ # These aren't available until Windows Vista.
+ # But you can still link them. They just won't be found when running the exe.
+ set(EVENT__HAVE_INET_NTOP 0)
+ set(EVENT__HAVE_INET_PTON 0)
+endif()
+
+# Check for different inline keyword versions.
+check_function_keywords("inline" "__inline" "__inline__")
+
+if (HAVE_INLINE)
+ set (EVENT__inline inline)
+elseif (HAVE___INLINE)
+ set(EVENT__inline __inline)
+elseif(HAVE___INLINE__)
+ set(EVENT__inline __inline__)
+else()
+ set(EVENT__inline)
+endif()
+
+CHECK_SYMBOL_EXISTS(TAILQ_FOREACH sys/queue.h EVENT__HAVE_TAILQFOREACH)
+CHECK_SYMBOL_EXISTS(CTL_KERN sys/sysctl.h EVENT__HAVE_DECL_CTL_KERN)
+CHECK_SYMBOL_EXISTS(KERN_ARND sys/sysctl.h EVENT__HAVE_DECL_KERN_ARND)
+CHECK_SYMBOL_EXISTS(KERN_RANDOM sys/sysctl.h EVENT__HAVE_DECL_KERN_RANDOM)
+CHECK_SYMBOL_EXISTS(RANDOM_UUID sys/sysctl.h EVENT__HAVE_DECL_RANDOM_UUID)
+CHECK_SYMBOL_EXISTS(F_SETFD fcntl.h EVENT__HAVE_SETFD)
+
+CHECK_TYPE_SIZE(fd_mask EVENT__HAVE_FD_MASK)
+
+CHECK_TYPE_SIZE(size_t EVENT__SIZEOF_SIZEE_T)
+if(NOT EVENT__SIZEOF_SIZE_T)
+ set(EVENT__size_t "unsigned")
+ set(EVENT__SIZEOF_SIZE_T ${EVENT__SIZEOF_UNSIGNED})
+else()
+ set(EVENT__size_t size_t)
+endif()
+
+CHECK_TYPE_SIZE("off_t" EVENT__SIZEOF_OFF_T LANGUAGE C)
+
+
+# XXX we should functionalize these size and type sets. --elley
+
+# Winssck.
+if (_MSC_VER)
+ list(APPEND CMAKE_EXTRA_INCLUDE_FILES BaseTsd.h)
+endif()
+CHECK_TYPE_SIZE("ssize_t" EVENT__SIZEOF_SSIZE_T_LOWER LANGUAGE C)
+CHECK_TYPE_SIZE("SSIZE_T" EVENT__SIZEOF_SSIZE_T_UPPER LANGUAGE C)
+
+if (EVENT__SIZEOF_SSIZE_T_LOWER)
+ set(EVENT__ssize_t "ssize_t")
+elseif (EVENT__SIZEOF_SSIZE_T_UPPER)
+ set(EVENT__ssize_t "SSIZE_T")
+else()
+ set(EVENT__ssize_t "int")
+ set(EVENT__SIZEOF_SSIZE_T ${EVENT__SIZEOF_INT})
+endif()
+
+
+CHECK_TYPE_SIZE(socklen_t EVENT__SIZEOF_SOCKLEN_T)
+if(NOT EVENT__SIZEOF_SOCKLEN_T)
+ set(EVENT__socklen_t "unsigned int")
+ set(EVENT__SIZEOF_SOCKLEN_T ${EVENT__SIZEOF_UNSIGNED_INT})
+else()
+ set(EVENT__socklen_t "socklen_t")
+endif()
+
+CHECK_TYPE_SIZE(pid_t EVENT__SIZEOF_PID_T)
+if(NOT EVENT__SIZEOF_PID_T)
+ set(EVENT__pid_t "int")
+ set(EVENT__SIZEOF_PID_T ${EVENT__SIZEOF_INT})
+else()
+ set(EVENT__pid_t "pid_t")
+ set(EVENT__SIZEOF_PID_T EVENT__SIZEOF_PID_T)
+endif()
+
+if (NOT EVENT__DISABLE_THREAD_SUPPORT)
+ CHECK_TYPE_SIZE(pthread_t EVENT__SIZEOF_PTHREAD_T)
+endif()
+
+if(EVENT__HAVE_CLOCK_GETTIME)
+ set(EVENT__DNS_USE_CPU_CLOCK_FOR_ID 1)
+endif()
+
+# we're just getting lazy now.
+CHECK_TYPE_SIZE("uintptr_t" EVENT__HAVE_UINTPTR_T)
+CHECK_TYPE_SIZE("void *" EVENT__SIZEOF_VOID_P)
+
+# Tests file offset bits.
+# TODO: Add AIX test for if -D_LARGE_FILES is needed.
+
+# XXX: Why is this here? we don't even use it. Well, we don't even use it
+# on top of that, why is it set in the config.h?! IT_MAKES_NO_SENSE
+# I'm commenting it out for now.
+# - ellzey
+
+#CHECK_FILE_OFFSET_BITS()
+#set(EVENT___FILE_OFFSET_BITS _FILE_OFFSET_BITS)
+
+# Verify kqueue works with pipes.
+if (EVENT__HAVE_KQUEUE)
+ if (CMAKE_CROSSCOMPILING AND NOT EVENT__FORCE_KQUEUE_CHECK)
+ message(WARNING "Cannot check if kqueue works with pipes when crosscompiling, use EVENT__FORCE_KQUEUE_CHECK to be sure (this requires manually running a test program on the cross compilation target)")
+ set(EVENT__HAVE_WORKING_KQUEUE 1)
+ else()
+ message(STATUS "Checking if kqueue works with pipes...")
+ include(CheckWorkingKqueue)
+ endif()
+endif()
+
+CHECK_SYMBOL_EXISTS(_MINIX "stdio.h" EVENT___MINIX)
+CHECK_SYMBOL_EXISTS(_POSIX_1_SOURCE "stdio.h" EVENT___POSIX_1_SOURCE)
+CHECK_SYMBOL_EXISTS(_POSIX_SOURCE "stdio.h" EVENT___POSIX_SOURCE)
+
+if(EVENT__HAVE_NETDB_H)
+ list(APPEND CMAKE_EXTRA_INCLUDE_FILES netdb.h)
+ CHECK_TYPE_SIZE("struct addrinfo" EVENT__HAVE_STRUCT_ADDRINFO)
+elseif(WIN32)
+ list(APPEND CMAKE_EXTRA_INCLUDE_FILES ws2tcpip.h)
+ CHECK_TYPE_SIZE("struct addrinfo" EVENT__HAVE_STRUCT_ADDRINFO)
+endif()
+
+# Check for sockaddr structure sizes.
+set(SOCKADDR_HEADERS)
+if (WIN32)
+ set(CMAKE_REQUIRED_DEFINITIONS "-DWIN32_LEAN_AND_MEAN")
+ if (_MSC_VER LESS 1300)
+ set(SOCKADDR_HEADERS winsock.h)
+ else()
+ set(SOCKADDR_HEADERS winsock2.h ws2tcpip.h)
+ endif()
+else()
+ if (EVENT__HAVE_NETINET_IN_H)
+ set(SOCKADDR_HEADERS ${SOCKADDR_HEADERS} netinet/in.h)
+ endif()
+
+ if (EVENT__HAVE_NETINET_IN6_H)
+ set(SOCKADDR_HEADERS ${SOCKADDR_HEADERS} netinet/in6.h)
+ endif()
+
+ if (EVENT__HAVE_SYS_SOCKET_H)
+ set(SOCKADDR_HEADERS ${SOCKADDR_HEADERS} sys/socket.h)
+ endif()
+
+ if (EVENT__HAVE_NETDB_H)
+ set(SOCKADDR_HEADERS ${SOCKADDR_HEADERS} netdb.h)
+ endif()
+endif()
+
+CHECK_TYPE_SIZE("struct in6_addr" EVENT__HAVE_STRUCT_IN6_ADDR)
+if(EVENT__HAVE_STRUCT_IN6_ADDR)
+ CHECK_STRUCT_HAS_MEMBER("struct in6_addr"
+ s6_addr16 "${SOCKADDR_HEADERS}"
+ EVENT__HAVE_STRUCT_IN6_ADDR_S6_ADDR16)
+
+ CHECK_STRUCT_HAS_MEMBER("struct in6_addr"
+ s6_addr32 "${SOCKADDR_HEADERS}"
+ EVENT__HAVE_STRUCT_IN6_ADDR_S6_ADDR32)
+endif()
+
+CHECK_TYPE_SIZE("sa_family_t" EVENT__HAVE_SA_FAMILY_T)
+CHECK_TYPE_SIZE("struct sockaddr_in6" EVENT__HAVE_STRUCT_SOCKADDR_IN6)
+
+if(EVENT__HAVE_STRUCT_SOCKADDR_IN6)
+ CHECK_STRUCT_HAS_MEMBER("struct sockaddr_in6"
+ sin6_len "${SOCKADDR_HEADERS}"
+ EVENT__HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN)
+
+ CHECK_STRUCT_HAS_MEMBER("struct sockaddr_in6"
+ sin_len "${SOCKADDR_HEADERS}"
+ EVENT__HAVE_STRUCT_SOCKADDR_IN_SIN_LEN)
+endif()
+
+CHECK_TYPE_SIZE("struct sockaddr_storage" EVENT__HAVE_STRUCT_SOCKADDR_STORAGE)
+if(EVENT__HAVE_STRUCT_SOCKADDR_STORAGE)
+ CHECK_STRUCT_HAS_MEMBER("struct sockaddr_storage"
+ ss_family "${SOCKADDR_HEADERS}"
+ EVENT__HAVE_STRUCT_SOCKADDR_STORAGE_SS_FAMILY)
+
+ CHECK_STRUCT_HAS_MEMBER("struct sockaddr_storage"
+ __ss_family "${SOCKADDR_HEADERS}" EVENT__HAVE_STRUCT_SOCKADDR_STORAGE___SS_FAMILY)
+endif()
+
+# Group the source files.
+set(HDR_PRIVATE
+ bufferevent-internal.h
+ changelist-internal.h
+ defer-internal.h
+ epolltable-internal.h
+ evbuffer-internal.h
+ event-internal.h
+ evmap-internal.h
+ evrpc-internal.h
+ evsignal-internal.h
+ evthread-internal.h
+ ht-internal.h
+ http-internal.h
+ iocp-internal.h
+ ipv6-internal.h
+ log-internal.h
+ minheap-internal.h
+ mm-internal.h
+ ratelim-internal.h
+ strlcpy-internal.h
+ util-internal.h
+ evconfig-private.h
+ compat/sys/queue.h)
+
+set(HDR_COMPAT
+ include/evdns.h
+ include/evrpc.h
+ include/event.h
+ include/evhttp.h
+ include/evutil.h)
+
+set(HDR_PUBLIC
+ include/event2/buffer.h
+ include/event2/bufferevent.h
+ include/event2/bufferevent_compat.h
+ include/event2/bufferevent_struct.h
+ include/event2/buffer_compat.h
+ include/event2/dns.h
+ include/event2/dns_compat.h
+ include/event2/dns_struct.h
+ include/event2/event.h
+ include/event2/event_compat.h
+ include/event2/event_struct.h
+ include/event2/http.h
+ include/event2/http_compat.h
+ include/event2/http_struct.h
+ include/event2/keyvalq_struct.h
+ include/event2/listener.h
+ include/event2/rpc.h
+ include/event2/rpc_compat.h
+ include/event2/rpc_struct.h
+ include/event2/tag.h
+ include/event2/tag_compat.h
+ include/event2/thread.h
+ include/event2/util.h
+ include/event2/visibility.h
+ ${PROJECT_BINARY_DIR}/include/event2/event-config.h)
+
+set(SRC_CORE
+ buffer.c
+ bufferevent.c
+ bufferevent_filter.c
+ bufferevent_pair.c
+ bufferevent_ratelim.c
+ bufferevent_sock.c
+ event.c
+ evmap.c
+ evthread.c
+ evutil.c
+ evutil_rand.c
+ evutil_time.c
+ listener.c
+ log.c
+ signal.c
+ strlcpy.c)
+
+if(EVENT__HAVE_SELECT)
+ list(APPEND SRC_CORE select.c)
+endif()
+
+if(EVENT__HAVE_POLL)
+ list(APPEND SRC_CORE poll.c)
+endif()
+
+if(EVENT__HAVE_KQUEUE)
+ list(APPEND SRC_CORE kqueue.c)
+endif()
+
+if(EVENT__HAVE_DEVPOLL)
+ list(APPEND SRC_CORE devpoll.c)
+endif()
+
+if(EVENT__HAVE_EPOLL)
+ list(APPEND SRC_CORE epoll_sub.c epoll.c)
+endif()
+
+if(EVENT__HAVE_EVENT_PORTS)
+ list(APPEND SRC_CORE evport.c)
+endif()
+
+if (NOT EVENT__DISABLE_OPENSSL)
+ find_package(OpenSSL REQUIRED)
+
+ set(EVENT__HAVE_OPENSSL 1)
+
+ message(STATUS "OpenSSL include: ${OPENSSL_INCLUDE_DIR}")
+ message(STATUS "OpenSSL lib: ${OPENSSL_LIBRARIES}")
+
+ include_directories(${OPENSSL_INCLUDE_DIR})
+
+ list(APPEND SRC_CORE bufferevent_openssl.c)
+ list(APPEND HDR_PUBLIC include/event2/bufferevent_ssl.h)
+ list(APPEND LIB_APPS ${OPENSSL_LIBRARIES})
+endif()
+
+if (NOT EVENT__DISABLE_THREAD_SUPPORT)
+ if (WIN32)
+ list(APPEND SRC_CORE evthread_win32.c)
+ else()
+ find_package(Threads REQUIRED)
+ if (NOT CMAKE_USE_PTHREADS_INIT)
+ message(FATAL_ERROR
+ "Failed to find Pthreads, set EVENT__DISABLE_THREAD_SUPPORT to disable")
+ endif()
+
+ set(EVENT__HAVE_PTHREADS 1)
+ list(APPEND SRC_CORE evthread_pthread.c)
+ list(APPEND LIB_APPS ${CMAKE_THREAD_LIBS_INIT})
+ endif()
+endif()
+
+if (NOT EVENT__DISABLE_TESTS)
+ # Zlib is only used for testing.
+ find_package(ZLIB)
+
+ if (ZLIB_LIBRARY)
+ include_directories(${ZLIB_INCLUDE_DIRS})
+
+ set(EVENT__HAVE_ZLIB 1)
+ set(EVENT__HAVE_ZLIB_H)
+ list(APPEND LIB_APPS ${ZLIB_LIBRARIES})
+ endif()
+endif()
+
+set(SRC_EXTRA
+ event_tagging.c
+ http.c
+ evdns.c
+ evrpc.c)
+
+add_definitions(-DHAVE_CONFIG_H)
+
+# We use BEFORE here so we don't accidentally look in system directories
+# first for some previous versions of the headers that are installed.
+include_directories(BEFORE ${PROJECT_SOURCE_DIR}
+ ${PROJECT_SOURCE_DIR}/compat
+ ${PROJECT_SOURCE_DIR}/include)
+
+if(WIN32)
+ list(APPEND SRC_CORE
+ buffer_iocp.c
+ bufferevent_async.c
+ event_iocp.c
+ evthread_win32.c
+ win32select.c)
+
+ list(APPEND HDR_PRIVATE WIN32-Code/getopt.h)
+
+ set(EVENT__DNS_USE_FTIME_FOR_ID 1)
+ set(LIB_PLATFORM ws2_32)
+ add_definitions(
+ -D_CRT_SECURE_NO_WARNINGS
+ -D_CRT_NONSTDC_NO_DEPRECATE)
+
+ include_directories(./WIN32-Code)
+endif()
+
+if (UNIX)
+ list(APPEND LIB_PLATFORM m)
+endif()
+
+source_group("Headers Private" FILES ${HDR_PRIVATE})
+source_group("Header Compat" FILES ${HDR_COMPAT})
+source_group("Headers Public" FILES ${HDR_PUBLIC})
+source_group("Source Core" FILES ${SRC_CORE})
+source_group("Source Extra" FILES ${SRC_EXTRA})
+
+# Generate the configure headers.
+# (Place them in the build dir so we don't polute the source tree with generated files).
+include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/include)
+
+if (EVENT__BUILD_SHARED_LIBRARIES)
+ set(EVENT__LIBRARY_TYPE SHARED)
+
+ if ((CMAKE_COMPILER_IS_GNUCC) OR (${CMAKE_C_COMPILER_ID} STREQUAL "Clang"))
+ add_compiler_flags(-fvisibility=hidden)
+ elseif ("${CMAKE_C_COMPILER_ID}" STREQUAL "SunPro")
+ add_compiler_flags(-xldscope=hidden)
+ endif()
+
+ set(EVENT__NEED_DLLIMPORT 1)
+else (EVENT__BUILD_SHARED_LIBRARIES)
+ set(EVENT__LIBRARY_TYPE STATIC)
+endif (EVENT__BUILD_SHARED_LIBRARIES)
+
+configure_file(
+ ${CMAKE_CURRENT_SOURCE_DIR}/event-config.h.cmake
+ ${CMAKE_CURRENT_BINARY_DIR}/include/event2/event-config.h
+ NEWLINE_STYLE UNIX)
+
+configure_file(
+ ${CMAKE_CURRENT_SOURCE_DIR}/evconfig-private.h.cmake
+ ${CMAKE_CURRENT_BINARY_DIR}/include/evconfig-private.h)
+
+#
+# Create the libraries.
+#
+
+# TODO: Add dynamic versions of the libraries as well.
+add_library(event_core ${EVENT__LIBRARY_TYPE}
+ ${HDR_PRIVATE}
+ ${HDR_COMPAT}
+ ${HDR_PUBLIC}
+ ${SRC_CORE})
+
+add_library(event_extra ${EVENT__LIBRARY_TYPE}
+ ${HDR_PRIVATE}
+ ${HDR_COMPAT}
+ ${HDR_PUBLIC}
+ ${SRC_CORE}
+ ${SRC_EXTRA})
+
+# library exists for historical reasons; it contains the contents of
+# both libevent_core and libevent_extra. You shouldn’t use it; it may
+# go away in a future version of Libevent.
+add_library(event ${EVENT__LIBRARY_TYPE}
+ ${HDR_PRIVATE}
+ ${HDR_COMPAT}
+ ${HDR_PUBLIC}
+ ${SRC_CORE}
+ ${SRC_EXTRA})
+
+if (EVENT__BUILD_SHARED_LIBRARIES)
+ # Prepare static library to be linked to tests that need hidden symbols
+ add_library(event_extra_static STATIC
+ ${HDR_PRIVATE}
+ ${HDR_COMPAT}
+ ${HDR_PUBLIC}
+ ${SRC_CORE}
+ ${SRC_EXTRA})
+
+ set(EVENT_EXTRA_FOR_TEST event_extra_static)
+
+ target_link_libraries(event_core ${OPENSSL_LIBRARIES}
+ ${CMAKE_THREAD_LIBS_INIT}
+ ${LIB_PLATFORM})
+
+ target_link_libraries(event ${OPENSSL_LIBRARIES}
+ ${CMAKE_THREAD_LIBS_INIT}
+ ${LIB_PLATFORM})
+
+ target_link_libraries(event_extra ${OPENSSL_LIBRARIES}
+ ${CMAKE_THREAD_LIBS_INIT}
+ ${LIB_PLATFORM})
+
+ set_target_properties(event
+ PROPERTIES SOVERSION
+ ${EVENT_ABI_LIBVERSION})
+
+ set_target_properties(event_core
+ PROPERTIES SOVERSION
+ ${EVENT_ABI_LIBVERSION})
+
+ set_target_properties(event_extra
+ PROPERTIES SOVERSION
+ ${EVENT_ABI_LIBVERSION})
+
+else (EVENT__BUILD_SHARED_LIBRARIES)
+ set(EVENT_EXTRA_FOR_TEST event_extra)
+endif (EVENT__BUILD_SHARED_LIBRARIES)
+
+#
+# Samples.
+#
+
+if (NOT EVENT__DISABLE_SAMPLES)
+ set(SAMPLES
+ dns-example
+ event-read-fifo
+ hello-world
+ signal-test
+ http-server
+ http-connect
+ time-test)
+
+ if (NOT EVENT__DISABLE_OPENSSL AND OPENSSL_LIBRARIES)
+ set(CMAKE_REQUIRED_LIBRARIES ${OPENSSL_LIBRARIES})
+ CHECK_FUNCTION_EXISTS_EX(ERR_remove_thread_state EVENT__HAVE_ERR_REMOVE_THREAD_STATE)
+ set(CMAKE_REQUIRED_LIBRARIES "")
+
+ # Special sample with more than one file.
+ add_executable(https-client
+ sample/https-client.c
+ sample/openssl_hostname_validation.c
+ sample/hostcheck.c)
+
+ target_link_libraries(https-client
+ event_extra
+ ${LIB_APPS}
+ ${LIB_PLATFORM})
+
+ add_dependencies(https-client event_extra)
+
+ # Requires OpenSSL.
+ list(APPEND SAMPLES le-proxy)
+ endif()
+
+ foreach(SAMPLE ${SAMPLES})
+ add_executable(${SAMPLE}
+ sample/${SAMPLE}.c)
+
+ target_link_libraries(${SAMPLE}
+ event_extra
+ ${LIB_APPS}
+ ${LIB_PLATFORM})
+
+ add_dependencies(${SAMPLE} event_extra)
+ endforeach()
+
+ if (WIN32)
+ target_sources(dns-example PUBLIC
+ WIN32-Code/getopt.c
+ WIN32-Code/getopt_long.c)
+ endif()
+endif()
+
+if (NOT EVENT__DISABLE_BENCHMARK)
+ foreach (BENCHMARK bench bench_cascade bench_http bench_httpclient)
+ set(BENCH_SRC test/${BENCHMARK}.c)
+
+ if (WIN32)
+ list(APPEND BENCH_SRC
+ WIN32-Code/getopt.c
+ WIN32-Code/getopt_long.c)
+ endif()
+
+ add_executable(${BENCHMARK} ${BENCH_SRC})
+
+ target_link_libraries(${BENCHMARK}
+ event_extra
+ ${LIB_PLATFORM})
+
+ add_dependencies(${BENCHMARK} event_extra)
+ endforeach()
+endif()
+
+if (NOT EVENT__DISABLE_TESTS)
+ #
+ # Generate Regress tests.
+ #
+ if (NOT EVENT__DISABLE_REGRESS)
+
+ # (We require python to generate the regress tests)
+ find_package(PythonInterp)
+
+ if (PYTHONINTERP_FOUND AND PYTHON_VERSION_STRING VERSION_LESS "3.0.0")
+ set(__FOUND_USABLE_PYTHON 1)
+ endif()
+
+ if (__FOUND_USABLE_PYTHON)
+ message(STATUS "Generating regress tests...")
+
+ add_definitions(-DTINYTEST_LOCAL)
+
+ add_custom_command(
+ OUTPUT
+ ${CMAKE_CURRENT_SOURCE_DIR}/test/regress.gen.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/test/regress.gen.h
+ DEPENDS
+ event_rpcgen.py
+ test/regress.rpc
+ COMMAND ${PYTHON_EXECUTABLE} ../event_rpcgen.py regress.rpc
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/test)
+
+ list(APPEND SRC_REGRESS
+ test/regress.c
+ test/regress.gen.c
+ test/regress.gen.h
+ test/regress_buffer.c
+ test/regress_bufferevent.c
+ test/regress_dns.c
+ test/regress_et.c
+ test/regress_finalize.c
+ test/regress_http.c
+ test/regress_listener.c
+ test/regress_main.c
+ test/regress_minheap.c
+ test/regress_rpc.c
+ test/regress_testutils.c
+ test/regress_testutils.h
+ test/regress_util.c
+ test/tinytest.c
+ ${SRC_CORE}
+ ${SRC_EXTRA})
+
+ if (WIN32)
+ list(APPEND SRC_REGRESS test/regress_iocp.c)
+ list(APPEND SRC_REGRESS test/regress_thread.c)
+ endif()
+
+ if (CMAKE_USE_PTHREADS_INIT)
+ list(APPEND SRC_REGRESS test/regress_thread.c)
+ endif()
+
+ if (ZLIB_LIBRARY)
+ list(APPEND SRC_REGRESS test/regress_zlib.c)
+ endif()
+
+ if (OPENSSL_LIBRARIES)
+ list(APPEND SRC_REGRESS test/regress_ssl.c)
+ endif()
+
+ add_executable(regress ${SRC_REGRESS})
+
+ # While building the test suite we don't want the visibility
+ # header trying to "dllimport" the symbols on windows (it
+ # generates a ton of warnings due to different link
+ # attributes for all of the symbols)
+ SET_TARGET_PROPERTIES(regress
+ PROPERTIES COMPILE_DEFINITIONS
+ "EVENT_BUILDING_REGRESS_TEST=1")
+
+ target_link_libraries(regress
+ ${LIB_APPS}
+ ${LIB_PLATFORM})
+ else()
+ message(WARNING "No suitable Python interpreter found, cannot generate regress tests!")
+ endif()
+ endif()
+
+ #
+ # Test programs.
+ #
+ # all of these, including the cmakelists.txt should be moved
+ # into the dirctory 'tests' first.
+ #
+ # doing this, we can remove all the DISABLE_TESTS stuff, and simply
+ # do something like:
+ #
+ # add_custom_targets(tests)
+ # add_executable(... EXCLUDE_FROM_ALL ...c)
+ # add_dependencis(tests testa testb testc)
+ # add_test(....)
+ #
+ # then you can just run 'make tests' instead of them all
+ # auto-compile|running
+ # - ellzey
+ set(TESTPROGS test-changelist
+ test-eof
+ test-fdleak
+ test-init
+ test-time
+ test-weof)
+
+ set(ALL_TESTPROGS
+ ${TESTPROGS}
+ test-dumpevents
+ test-ratelim)
+
+ # Create test program executables.
+ foreach (TESTPROG ${ALL_TESTPROGS})
+ add_executable(${TESTPROG}
+ test/${TESTPROG}.c)
+
+ target_link_libraries(${TESTPROG}
+ ${EVENT_EXTRA_FOR_TEST}
+ ${LIB_PLATFORM})
+
+ add_dependencies(${TESTPROG}
+ ${EVENT_EXTRA_FOR_TEST})
+ endforeach()
+
+ #
+ # We run all tests with the different backends turned on one at a time.
+ #
+
+ # Add event backends based on system introspection result.
+ set(BACKENDS "")
+
+ if (EVENT__HAVE_EPOLL)
+ list(APPEND BACKENDS EPOLL)
+ endif()
+
+ if (EVENT__HAVE_SELECT)
+ list(APPEND BACKENDS SELECT)
+ endif()
+
+ if (EVENT__HAVE_POLL)
+ list(APPEND BACKENDS POLL)
+ endif()
+
+ if (EVENT__HAVE_KQUEUE)
+ list(APPEND BACKENDS KQUEUE)
+ endif()
+
+ if (EVENT__HAVE_EVENT_PORTS)
+ list(APPEND BACKENDS EVPORT)
+ endif()
+
+ if (EVENT__HAVE_DEVPOLL)
+ list(APPEND BACKENDS DEVPOLL)
+ endif()
+
+ if (WIN32)
+ list(APPEND BACKENDS WIN32)
+ endif()
+
+
+ # Default environment variables turns off all event systems,
+ # then we enable each one, one at a time when creating the tests.
+ set(DEFAULT_TEST_ENV_VARS "EVENT_SHOW_METHOD=1;")
+ foreach(BACKEND ${BACKENDS})
+ set(BACKEND_ENV_VAR "EVENT_NO${BACKEND}=1")
+ list(APPEND DEFAULT_TEST_ENV_VARS "${BACKEND_ENV_VAR}")
+ endforeach()
+
+ # Macro that creates the ctest test for a backend.
+ macro(add_backend_test BACKEND_TEST_NAME ENV_VARS)
+ set(TEST_NAMES "")
+
+ foreach (TESTPROG ${TESTPROGS})
+ set(TEST_NAME ${TESTPROG}__${BACKEND_TEST_NAME})
+
+ add_test(${TEST_NAME}
+ ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${TESTPROG})
+
+ list(APPEND TEST_NAMES ${TEST_NAME})
+
+ set_tests_properties(${TEST_NAME}
+ PROPERTIES ENVIRONMENT "${ENV_VARS}")
+ endforeach()
+
+ # Dump events test.
+ if (__FOUND_USABLE_PYTHON)
+ set(TEST_NAME test-dumpevents__${BACKEND_TEST_NAME})
+
+ add_test(${TEST_NAME}
+ ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/test-dumpevents |
+ ${PYTHON_EXECUTABLE}
+ ${CMAKE_CURRENT_SOURCE_DIR}/test/check-dumpevents.py)
+
+ set_tests_properties(${TEST_NAME}
+ PROPERTIES ENVIRONMENT "${ENV_VARS}")
+ else()
+ message(WARNING "test-dumpevents will be run without output check since python was not found!")
+ set(TEST_NAME test-dumpevents__${BACKEND_TEST_NAME}_no_check)
+
+ add_test(${TEST_NAME}
+ ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/test-dumpevents)
+
+ set_tests_properties(${TEST_NAME}
+ PROPERTIES ENVIRONMENT "${ENV_VARS}")
+ endif()
+
+ # Regress tests.
+ if (NOT EVENT__DISABLE_REGRESS AND __FOUND_USABLE_PYTHON)
+ set(TEST_NAME regress__${BACKEND_TEST_NAME})
+
+ add_test(${TEST_NAME}
+ ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/regress)
+
+ set_tests_properties(${TEST_NAME}
+ PROPERTIES ENVIRONMENT "${ENV_VARS}")
+
+ add_test(${TEST_NAME}_debug
+ ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/regress)
+
+ set_tests_properties(${TEST_NAME}_debug
+ PROPERTIES ENVIRONMENT "${ENV_VARS};EVENT_DEBUG_MODE=1")
+ endif()
+ endmacro()
+
+ # Add the tests for each backend.
+ foreach(BACKEND ${BACKENDS})
+ # Enable this backend only.
+ set(BACKEND_ENV_VARS ${DEFAULT_TEST_ENV_VARS})
+ list(REMOVE_ITEM BACKEND_ENV_VARS EVENT_NO${BACKEND}=1)
+
+ # Epoll has some extra settings.
+ if (${BACKEND} STREQUAL "EPOLL")
+ add_backend_test(timerfd_${BACKEND}
+ "${BACKEND_ENV_VARS};EVENT_PRECISE_TIMER=1")
+
+ add_backend_test(changelist_${BACKEND}
+ "${BACKEND_ENV_VARS};EVENT_EPOLL_USE_CHANGELIST=yes")
+
+ add_backend_test(timerfd_changelist_${BACKEND}
+ "${BACKEND_ENV_VARS};EVENT_EPOLL_USE_CHANGELIST=yes;EVENT_PRECISE_TIMER=1")
+ else()
+ add_backend_test(${BACKEND} "${BACKEND_ENV_VARS}")
+ endif()
+ endforeach()
+
+ #
+ # Rate limiter tests.
+ #
+
+ # Group limits, no connection limit.
+ set(RL_BIN ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/test-ratelim)
+
+ add_test(test-ratelim__group_lim
+ ${RL_BIN}
+ -g 30000
+ -n 30
+ -t 100
+ --check-grouplimit 1000
+ --check-stddev 100)
+
+ # Connection limit, no group limit.
+ add_test(test-ratelim__con_lim
+ ${RL_BIN}
+ -c 1000
+ -n 30
+ -t 100
+ --check-connlimit 50
+ --check-stddev 50)
+
+ # Connection limit and group limit.
+ add_test(test-ratelim__group_con_lim
+ ${RL_BIN}
+ -c 1000
+ -g 30000
+ -n 30
+ -t 100
+ --check-grouplimit 1000
+ --check-connlimit 50
+ --check-stddev 50)
+
+ # Connection limit and group limit with independent drain.
+ add_test(test-ratelim__group_con_lim_drain
+ ${RL_BIN}
+ -c 1000
+ -g 35000
+ -n 30
+ -t 100
+ -G 500
+ --check-grouplimit 1000
+ --check-connlimit 50
+ --check-stddev 50)
+
+ # Add a "make verify" target, same as for autoconf.
+ # (Important! This will unset all EVENT_NO* environment variables.
+ # If they are set in the shell the tests are running using simply "ctest" or "make test" will fail)
+ if (WIN32)
+ # Windows doesn't have "unset". But you can use "set VAR=" instead.
+ # We need to guard against the possibility taht EVENT_NOWIN32 is set, and all test failing
+ # since no event backend being available.
+ file(TO_NATIVE_PATH ${CMAKE_CTEST_COMMAND} WINDOWS_CTEST_COMMAND)
+
+ file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/tmp/verify_tests.bat
+ "
+ set EVENT_NOWIN32=
+ \"${WINDOWS_CTEST_COMMAND}\"
+ ")
+
+ message(STATUS "${WINDOWS_CTEST_COMMAND}")
+
+ file(COPY ${CMAKE_CURRENT_BINARY_DIR}/tmp/verify_tests.bat
+ DESTINATION
+ ${CMAKE_CURRENT_BINARY_DIR}
+ FILE_PERMISSIONS
+ OWNER_READ
+ OWNER_WRITE
+ OWNER_EXECUTE
+ GROUP_READ
+ GROUP_EXECUTE
+ WORLD_READ WORLD_EXECUTE)
+
+ file(TO_NATIVE_PATH
+ "${CMAKE_CURRENT_BINARY_DIR}/verify_tests.bat" VERIFY_PATH)
+
+ add_custom_target(verify COMMAND "${VERIFY_PATH}"
+ DEPENDS event ${ALL_TESTPROGS})
+ else()
+ # On some platforms doing exec(unset) as CMake does won't work, so make sure
+ # we run the unset command in a shell instead.
+ # First we write the script contents.
+ file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/tmp/verify_tests.sh
+ "
+ #!/bin/bash
+ unset EVENT_NOEPOLL; unset EVENT_NOPOLL; unset EVENT_NOSELECT; unset EVENT_NOWIN32; unset EVENT_NOEVPORT; unset EVENT_NOKQUEUE; unset EVENT_NODEVPOLL
+ ${CMAKE_CTEST_COMMAND}
+ ")
+
+ # Then we copy the file (this allows us to set execute permission on it)
+ file(COPY ${CMAKE_CURRENT_BINARY_DIR}/tmp/verify_tests.sh
+ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}
+ FILE_PERMISSIONS
+ OWNER_READ
+ OWNER_WRITE
+ OWNER_EXECUTE
+ GROUP_READ
+ GROUP_EXECUTE
+ WORLD_READ
+ WORLD_EXECUTE)
+
+ # Create the target that runs the script.
+ add_custom_target(verify
+ COMMAND
+ ${CMAKE_CURRENT_BINARY_DIR}/verify_tests.sh
+ DEPENDS
+ event
+ ${ALL_TESTPROGS})
+ endif()
+
+ if (NOT EVENT__DISABLE_REGRESS AND __FOUND_USABLE_PYTHON)
+ add_dependencies(verify regress)
+ endif()
+
+ if (EVENT__COVERAGE)
+ include(CodeCoverage)
+
+ setup_target_for_coverage(
+ verify_coverage # Coverage target name "make verify_coverage"
+ make # Test runner.
+ coverage # Output directory.
+ verify) # Arguments passed to test runner. "make verify"
+ endif()
+
+ enable_testing()
+
+ include(CTest)
+endif()
+
+#
+# Installation preparation.
+#
+
+# Allow the user to override installation directories.
+set(EVENT_INSTALL_LIB_DIR lib CACHE PATH "Installation directory for libraries")
+set(EVENT_INSTALL_BIN_DIR bin CACHE PATH "Installation directory for executables")
+set(EVENT_INSTALL_INCLUDE_DIR include CACHE PATH "Installation directory for header files")
+
+if(WIN32 AND NOT CYGWIN)
+ set(DEF_INSTALL_CMAKE_DIR cmake)
+else()
+ set(DEF_INSTALL_CMAKE_DIR lib/cmake/libevent)
+endif()
+
+set(EVENT_INSTALL_CMAKE_DIR ${DEF_INSTALL_CMAKE_DIR} CACHE PATH "Installation directory for CMake files")
+
+# Make sure the paths are absolute.
+foreach(p LIB BIN INCLUDE CMAKE)
+ set(var EVENT_INSTALL_${p}_DIR)
+ if(NOT IS_ABSOLUTE "${${var}}")
+ set(${var} "${CMAKE_INSTALL_PREFIX}/${${var}}")
+ endif()
+endforeach()
+
+# Export targets (This is used for other CMake projects to easily find the libraries and include files).
+export(TARGETS event event_extra event_core
+ FILE "${PROJECT_BINARY_DIR}/LibeventTargets.cmake")
+export(PACKAGE libevent)
+
+# Generate the config file for the build-tree.
+set(EVENT__INCLUDE_DIRS
+ "${PROJECT_SOURCE_DIR}/include"
+ "${PROJECT_BINARY_DIR}/include")
+
+set(LIBEVENT_INCLUDE_DIRS
+ ${EVENT__INCLUDE_DIRS}
+ CACHE PATH "Libevent include directories")
+
+configure_file(${PROJECT_SOURCE_DIR}/cmake/LibeventConfigBuildTree.cmake.in
+ ${PROJECT_BINARY_DIR}/LibeventConfig.cmake
+ @ONLY)
+
+# Generate the config file for the installation tree.
+file(RELATIVE_PATH
+ REL_INCLUDE_DIR
+ "${EVENT_INSTALL_CMAKE_DIR}"
+ "${EVENT_INSTALL_INCLUDE_DIR}") # Calculate the relative directory from the Cmake dir.
+
+# Note the EVENT_CMAKE_DIR is defined in LibeventConfig.cmake.in,
+# we escape it here so it's evaluated when it is included instead
+# so that the include dirs are givenrelative to where the
+# config file is located.
+set(EVENT__INCLUDE_DIRS
+ "\${EVENT_CMAKE_DIR}/${REL_INCLUDE_DIR}")
+
+configure_file(${PROJECT_SOURCE_DIR}/cmake/LibeventConfig.cmake.in
+ ${PROJECT_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/LibeventConfig.cmake
+ @ONLY)
+
+# Generate version info for both build-tree and install-tree.
+configure_file(${PROJECT_SOURCE_DIR}/cmake/LibeventConfigVersion.cmake.in
+ ${PROJECT_BINARY_DIR}/LibeventConfigVersion.cmake
+ @ONLY)
+
+# Define the public headers.
+set_target_properties(event event_core event_extra
+ PROPERTIES PUBLIC_HEADER "${HDR_PUBLIC}")
+
+#
+# Install targets.
+#
+install(TARGETS event event_core event_extra
+ EXPORT LibeventTargets
+ RUNTIME DESTINATION "${EVENT_INSTALL_BIN_DIR}" COMPONENT bin
+ LIBRARY DESTINATION "${EVENT_INSTALL_LIB_DIR}" COMPONENT lib
+ ARCHIVE DESTINATION "${EVENT_INSTALL_LIB_DIR}" COMPONENT lib
+ PUBLIC_HEADER DESTINATION "${EVENT_INSTALL_INCLUDE_DIR}/event2" COMPONENT dev)
+
+# Install compat headers
+install(FILES ${HDR_COMPAT}
+ DESTINATION
+ "${EVENT_INSTALL_INCLUDE_DIR}"
+ COMPONENT dev)
+
+# Install the configs.
+install(FILES
+ ${PROJECT_BINARY_DIR}/${CMAKE_FILES_DIRECTORY}/LibeventConfig.cmake
+ ${PROJECT_BINARY_DIR}/LibeventConfigVersion.cmake
+ DESTINATION
+ "${EVENT_INSTALL_CMAKE_DIR}"
+ COMPONENT dev)
+
+# Install exports for the install-tree.
+install(EXPORT LibeventTargets
+ DESTINATION
+ "${EVENT_INSTALL_CMAKE_DIR}"
+ COMPONENT dev)
+
+set(LIBEVENT_LIBRARIES
+ event
+ event_core
+ event_extra
+ CACHE STRING "Libevent libraries")
+
+message("")
+message(" ---( Libevent " ${EVENT_VERSION} " )---")
+message("")
+message(STATUS "Available event backends: ${BACKENDS}")
+message(STATUS "CMAKE_BINARY_DIR: " ${CMAKE_BINARY_DIR})
+message(STATUS "CMAKE_CURRENT_BINARY_DIR: " ${CMAKE_CURRENT_BINARY_DIR})
+message(STATUS "CMAKE_SOURCE_DIR: " ${CMAKE_SOURCE_DIR})
+message(STATUS "CMAKE_CURRENT_SOURCE_DIR: " ${CMAKE_CURRENT_SOURCE_DIR})
+message(STATUS "PROJECT_BINARY_DIR: " ${PROJECT_BINARY_DIR})
+message(STATUS "PROJECT_SOURCE_DIR: " ${PROJECT_SOURCE_DIR})
+message(STATUS "CMAKE_MODULE_PATH: " ${CMAKE_MODULE_PATH})
+message(STATUS "CMAKE_COMMAND: " ${CMAKE_COMMAND})
+message(STATUS "CMAKE_ROOT: " ${CMAKE_ROOT} )
+message(STATUS "CMAKE_SYSTEM: " ${CMAKE_SYSTEM} )
+message(STATUS "CMAKE_SYSTEM_NAME: " ${CMAKE_SYSTEM_NAME} )
+message(STATUS "CMAKE_SYSTEM_VERSION: " ${CMAKE_SYSTEM_VERSION} )
+message(STATUS "CMAKE_SYSTEM_PROCESSOR: " ${CMAKE_SYSTEM_PROCESSOR} )
+message(STATUS "CMAKE_SKIP_RPATH: " ${CMAKE_SKIP_RPATH} )
+message(STATUS "CMAKE_VERBOSE_MAKEFILE: " ${CMAKE_VERBOSE_MAKEFILE} )
+message(STATUS "CMAKE_C_FLAGS: " ${CMAKE_C_FLAGS} )
+message(STATUS "CMAKE_BUILD_TYPE: " ${CMAKE_BUILD_TYPE} )
+message(STATUS "CMAKE_C_COMPILER: " ${CMAKE_C_COMPILER} )
+message(STATUS "CMAKE_AR: " ${CMAKE_AR} )
+message(STATUS "CMAKE_RANLIB: " ${CMAKE_RANLIB} )
+message("")
+
diff --git a/libs/libevent/docs/ChangeLog b/libs/libevent/docs/ChangeLog
new file mode 100644
index 0000000000..1499637504
--- /dev/null
+++ b/libs/libevent/docs/ChangeLog
@@ -0,0 +1,1402 @@
+Changes in version 2.1.5-beta (5 January 2015)
+
+ Security Fixes (evbuffers)
+ o Avoid integer overflow bugs in evbuffer_add() and related functions. See CVE-2014-6272 advisory for more information. (d49bc0e88b81a5812116074dc007f1db0ca1eecd)
+
+ New APIs (evconnlistener)
+ o Provide support for SO_REUSEPORT through LEV_OPT_REUSABLE_PORT (b625361 Maciej Soltysiak)
+
+ Bugfixes (core)
+ o Fix use-after-free error in EV_CLOSURE_EVENT callback (3cc0eac John Ohl)
+ o Fix race caused by event_active (3c7d6fc vjpai)
+
+ Bugfixes (evbuffer)
+ o Fix evbuffer_peek() with len==-1 and start_at non-NULL. (ba59923)
+ o Consistently check for failure from evbuffer_pullup() (60f8f72)
+ o Fix evbuffer_peek() with len==-1 and start_at non-NULL. (fb7e76a)
+
+ Bugfixes (windows, IOCP)
+ o be async: avoid double close() (f133b86 Azat Khuzhin)
+
+ Bugfixes (bufferevents)
+ o Fix issue #127, double free for filterevents that use BEV_OPT_CLOSE_ON_FREE (2c82aa0 John Ohl)
+ o make bufferevent_getwatermark api more robust (a21e510 ufo2243)
+ o [Bugfix] fix bufferevent setwatermark suspend_read (b34e4ac ufo2243)
+ o bufferevent_openssl: reset fd_is_set when setfd with -1 is called (3da84c2 Azat Khuzhin)
+ o Fix compilation for older OpenSSL versions. (5c7282f Joakim Soderberg)
+
+ New APIs (evhttp)
+ o Add evhttp_connection_set_family() to set addrinfo->family for DNS requests (12c29b0 Azat Khuzhin)
+ o Implement interface that provides the ability to have an outbound evhttp_connection free itself once all requests have completed (2b9ec4c,10fe4f John Ohl)
+
+ New APIs (core)
+ o Implement new/free for struct evutil_monotonic_timer and export monotonic time functions (f2645f8 Andrea Shepard)
+
+ Bugfixes (evdns)
+ o Load hosts file on Windows. (a0b247c Vilmos Nebehaj)
+ o Don't truncate hosts file path on Windows. (d0dc861 Vilmos Nebehaj)
+ o Fix a crash in evdns related to shutting down evdns (9f39c88,e8fe749)
+ o evdns: avoid read-after-free in evdns_request_timeout_callback() (61262a0 Azat Khuzhin)
+ o Correctly handle allocation failures in evdns_getaddrinfo (6a53d15)
+ o evdns: fix EVDNS_BASE_DISABLE_WHEN_INACTIVE in case retransmit/retry (74d0eee Azat Khuzhin)
+ o evdns: add retry/reissue tests for EVDNS_BASE_DISABLE_WHEN_INACTIVE (3ca9d43 Azat Khuzhin)
+ o evdns: fail ns after we are failing/retrasmitting request (97c750d Azat Khuzhin)
+
+ Bugfixes (evhttp)
+ o http: reset connection before installing retry timer (fix http retries handling) (bc79cc5 Azat Khuzhin)
+
+
+ Testing
+ o regress_dns: fix leaks in getaddrinfo_async{,_cancel_stress} tests (2fdc5f2 Azat Khuzhin)
+ o test: add family argument for http_connection_test_() (177b8a7 Azat Khuzhin)
+ o test: add regress for evhttp_connection_set_family() with AF_INET and AF_UNSPEC (42aefeb Azat Khuzhin)
+ o test/http: add regress test for set family to AF_INET6 (3fbf3cc Azat Khuzhin)
+ o Update to a more recent tinytest_macros. (8da5a18)
+ o test/regress: add simplestsignal: to track reorder bugs separately (b897bef Azat Khuzhin)
+ o test/evbuffer_peek: add regress in case we have first buffer greater (e2d139d Azat Khuzhin)
+ o More evbuffer_peek() test cases (154006a)
+ o use correct tt macro for pointer compare (08c88ea)
+ o regress_buffer: fix 'memcmp' compare size (79800df Maks Naumov)
+ o Fix a use-after-free in unit tests. CID 752027 (3739057)
+ o Fix a dead-code warning in unit tests. CID 1193548 (c119f24)
+ o Use evutil_weakrand() in unit tests. (a677b72, 364c110)
+ o Use a more precise calculation for max in time-ratelim.c (ca5b5c7)
+ o Make a buffer larger in the tests to avoid a scary evbuffer_copyout_from() (fb57b8b)
+ o Fix several memory leaks in the unit tests. (89c1a3b)
+ o Add test for evhttp_connection_free_on_completion (b0e9924 John Ohl)
+ o Fix annoying heisenbug in test-time.c (cb73704)
+
+ Sample code
+ o Make http-server.c output into good html5 (6d72bdc)
+ o Use FindClose for handle from FindFirstFile in http-server.c (6466e88)
+ o https-client: add -retries argument, for connection retries (d9da844 Azat Khuzhin)
+
+ Bugfixes (build)
+ o Add missing headerfile for cmake (15d90cc Trond Norbye)
+ o ignore one more test binary (b6593aa Michael Richardson)
+ o ignore config.cache/test-driver files (c83f333 Mike Frysinger)
+ o add a --disable-samples configure flag (0c492b3 Mike Frysinger)
+ o Add a few files created by "make verify" to .gitignore. (1a8295a Pierre Phaneuf)
+ o updates in cmake build (27bd9fa Sergey Nikulov)
+ o Fix cmake error when the Module path has more than one entry. (befbd13 Acer Yang)
+ o Fix CMake shared library build (e69d910 Nobuaki Sukegawa)
+ o Fix warnings when compiling with clang 3.5 (f5b4765 John Ohl)
+ o Fix mixed declarations and code (forbidden by ISO C90) (8afbdbc Thomas Bernard)
+
+ Bugfixes (miscellaneous)
+ o tree.h: drop duplicated content of tree.h (6193187 Azat Khuzhin)
+ o evdns: disable probing with EVDNS_BASE_DISABLE_WHEN_INACTIVE (610410b,ad0493e,fea86a6,d83b337,5ca9e97 Azat Khuzhin)
+ o [Bugfix] fix grammer error (3a4d249 ufo2243)
+ o Change return type of evutil_load_windows_system_library_ to HMODULE (f691389)
+ o Fix a c90 warning (76643dd)
+ o Fix a typo in a doxygen comment. Reported by 亦得. (be1aeff)
+ o remove trailing comma from enum (b361b8a Jean-Philippe Ouellet)
+
+ Bugfixes (FreeBSD)
+ o Handle ENOTCAPABLE from FreeBSD - this is returned if an event in the changelist is for an FD that has been closed. (6fd7394 Adrian Chadd)
+
+
+
+Changes in version 2.1.4-alpha (21 Mar 2014)
+
+ Libevent 2.1.4-alpha adds a number of new miscellaneous APIs to make
+ Libevent more useful, including support for early close detection with
+ epoll via EPOLLRDHUP, triggering bufferevent callbacks, adding more
+ evhttp callbacks, and more. There are also numerous bugfixes, including
+ a number for finalize-related issues from 2.1.3-alpha; and an
+ alternative (non-primary!) cmake-based build mechanism.
+
+ New APIs (core)
+ o Added event_base_get_num_events() (0fa107d Mobai Zhang)
+ o New event_base_active_by_fd API (865a142 Greg Hazel, 5c9da9a, 87fa2b0)
+ o Add event_base_active_by_signal by analogy (4865943)
+ o Add access to max event count stats (5173bef, efbd3dc, 26230a2
+ Andrew Sweeney)
+ o Implemented EV_CLOSED event for epoll backend
+ (EPOLLRDHUP). (b1b69ac Diego Giagio, 53d2793, 43ffcf6, dfe1e52
+ Marcin Juszkiewicz, ff26633 Joakim Soderberg, 3908a5e)
+
+ New APIs (evutil_secure_rng)
+ o Add evutil_secure_rng_set_urandom_device_file (2bbb5d7)
+
+ New APIs (bufferevents)
+ o Add function to fetch underlying ratelimit cfg (4b3d5af Mark Ellzey)
+ o Pass and return const for bufferevent_get_token_bucket_cfg (1c77fbb
+ Mark Ellzey)
+ o Add watermark introspection (4ce242b Ondřej Kuzník)
+ o Add an option to trigger bufferevent I/O callbacks (61ee18b Ondřej Kuzník)
+ o Add an option to trigger bufferevent event callbacks (a7384c7
+ Ondřej Kuzník)
+ o Clarifications in response to merge req. comments (bd41947 Ondřej
+ Kuzník)
+ o Minor optimizations on bufferevent_trigger options (a3172a4)
+
+ New APIs (evhttp)
+ o Add evhttp_connection_get_server(). (a7f82a3 Maxime Henrion)
+ o add a http default content type option (5a5acd9 Nicolas Martyanoff)
+ o http: implement new evhttp_connection_get_addr() api. (0c7f040 Azat
+ Khuzhin)
+ o Add a variant of evhttp_send_reply_chunk() with a callback on
+ evhttp_write_buffer() (8d8decf Julien BLACHE)
+ o Allow registering callback for parsing HTTP headers (b0bd7fe Balint Reczey)
+ o Provide on request complete callback facility (b083ca0 Andrew Sweeney)
+ o evhttp_request_set_on_complete_cb to be more specific about what
+ the function actually does and usage (da86dda Andrew Sweeney)
+ o Update unit test to make sure that the callback happens after the
+ output data is written (b85f398 Andrew Sweeney)
+
+ Features (evdns)
+ o bug fix for issues #293 evdns_base_load_hosts doesn't remove
+ outdated addresses (954d2f9, f03d353, 45eba6f Kuldeep Gupta)
+
+ Features: (cmake build support)
+ o Initial CMake commit. (e415196 Joakim Soderberg)
+ o Add all tests and benchmarks to CMake project. (e9fc014 Joakim Soderberg)
+ o More work on adding tests to CMake project (99c1dc3 Joakim Soderberg)
+ o Generate a dummy evconfig-private.h so things build
+ properly. (ce14def Joakim Soderberg)
+ o Link libm on unix platforms. (58fcd42 Joakim Soderberg)
+ o Added some GCC specific options. (19222e5 Joakim Soderberg)
+ o Use evutil_closesocket instead. (dbf2b51 Joakim Soderberg)
+ o Add copyright and licensing files for CMake modules. (c259d53
+ Joakim Soderberg)
+ o Only include WIN32 getopt where it is used. (9bbce0b Joakim Soderberg)
+ o Fix bench_cascade program on Windows. (78da644 Joakim Soderberg)
+ o Don't segfault on no found event backend. (8f2af50 Joakim Soderberg)
+ o Only test the event backends available on the system. (7ea4159
+ Joakim Soderberg)
+ o Added a "make verify" target. (e053c4f Joakim Soderberg)
+ o Fix the make "verify" target on Windows. (67e5d74 Joakim Soderberg)
+ o Get rid of deprecation warnings for OpenSSL on OSX 10.7+ (69c3516
+ Joakim Söderberg)
+ o Fix kqueue support. (a831f2f Joakim Söderberg)
+ o Added a test for testing if kqueue works with pipes. (2799b35
+ Joakim Söderberg)
+ o Change the BSD license from 4 to 3-clause. (86df3ed Joakim Soderberg)
+ o Minimum required python version is 2.4. (968e97b Joakim Soderberg)
+ o Get rid of unknown pragma warnings. (0ef1d04 Joakim Soderberg)
+ o Add a "make verify_coverage" target generation coverage
+ info. (f2483f8 Joakim Soderberg)
+ o Fix the "make verify" target on NetBSD (4ac086a Joakim Soderberg)
+ o Only look for ZLib when it is used (if tests are
+ included). (f780593 Joakim Soderberg)
+ o Added EVENT__ENABLE_GCC_WARNINGS, turns all warnings into
+ errors. (dd413bd Joakim Soderberg)
+ o Add CMake config and install targets. (f3446ed Joakim Soderberg)
+ o Fix typo (4b754df Joakim Soderberg)
+ o Some work on making it possible to simply do add_subdirectory() on
+ the project. (49ab363 Joakim Soderberg)
+ o Set USE_DEBUG=1 on EVENT__ENABLE_VERBOSE_DEBUG (fd42e70 Joakim Soderberg)
+ o Fix so that old nmake project still builds. (24d6466 Joakim
+ Soderberg)
+ o Rename README to README.md and use markdown to format. (d2bc39a
+ Joakim Soderberg)
+ o Update README with CMake build instructions. (604b8cc Joakim Soderberg)
+ o Clean up the README some. (8d4cb35 JoakimSoderberg)
+ o Forgotten headers for old nmake project compatability. (8697b99
+ Joakim Soderberg)
+ o Change all uses of WIN32 to _WIN32 (4e14395 Joakim Söderberg)
+ o Fix include bug. (2024467 Joakim Söderberg)
+ o Check if we're on OSX before disabling deprecation in le-proxy
+ (8b40a5b Joakim Söderberg)
+ o Fix broken autotools build. (ae1bd82 Joakim Söderberg)
+ o Disclaimerize cmake a little in the README (d03b5bf)
+ o Fix CMake compile when OpenSSL is disabled. (e423d42 Joakim
+ Söderberg)
+ o CMake: Get rid of python not found warning when regress tests
+ turned off. (d38d798 Joakim Söderberg)
+ o Fix https-client compilation on Windows. (d7be788 Joakim Soderberg)
+ o Guard against EVENT_NOWIN32 being set during testing. (f1715b4
+ Joakim Soderberg)
+ o Check for OSX when checking for clang. (e212c54 Joakim Soderberg)
+ o Added a Travis-CI configuration file. (8c0f0a9 Joakim Soderberg)
+ o Added -Qunused-arguments for clang on macosx (ed99d92 Trond Norbye)
+ o Rename event_extras to event_extra (a0dd5df Trond Norbye)
+ o Add option to build shared library (4545fa9 Trond Norbye)
+ o Add -Qunused-arguments for clang on macos (b56611d Trond Norbye)
+ o Add cmake-related files to .gitignore (e061321 Trond Norbye)
+ o Export event_extra not event_extras. (2b41bcf Joakim Söderberg)
+
+ Bugfixes (core)
+ o If evsel->del() fails, don't leave the evmap in an inconsistent
+ state (9b5a527 Maxime Henrion)
+ o Move event_debug_note_teardown_ before mm_free. (69b5c64)
+ o Check CLOCK_MONOTONIC_* at runtime if needed. (911abf3)
+ o Fix reinit of fds with EV_WRITE but not EV_READ. (ebfd8a8 maksqwe)
+ o Tweaked callbacks to prevent race condition
+ (https://github.com/libevent/libevent/issues/104) (40830f1, 2ea15ed
+ John Ohl)
+ o Move assert(ev) to before we use ev in EV_CLOSURE_EVENT_FINALIZE
+ case (9805972)
+
+ Bugfixes (evhttp)
+ o Fix a double close() bug in evhttp when the underlying bufferevent uses
+ BEV_OPT_CLOSE_ON_FREE. (31db8a0 Maxime Henrion)
+ o Fix an unlikely but possible error case for http connections (f22049e)
+ o Avoid racy bufferevent activation (5eb1788 Nate Rosenblum)
+
+ Bugfixes on 2.0 (Windows)
+ o Use windows vsnprintf fixup logic on all windows environments (e826f19)
+ o libevent/win32_dealloc() : fix sizeof(pointer) vs sizeof(*pointer)
+ (b8f5980 Frank Denis)
+
+ Bugfixes (evutil_secure_rng)
+ o When we seed from /proc/sys/kernel/random/uuid, count it as success
+ (e35b540)
+ o We should return after arc4random_buf() (1ea1f26 Makoto Kato)
+ o Avoid other RNG initialization FS reads when urandom file is
+ specified (9695e9c)
+ o Really remove RNG seeds from the stack (f5ced88)
+ o Fix another arc4random_buf-related warning (e64a2b0)
+
+ Bugfixes (bufferevents)
+ o Initialize async bufferevent timeout CBs unconditionally (af9b2a7)
+
+ Bugfixes (evdns)
+ o Checking request nameserver for NULL, before using it. (5c710c0
+ Belobrov Andrey)
+ o Fix SEGFAULT after evdns_base_resume if no nameservers
+ installed. (14971a8 Azat Khuzhin)
+ o Actually use the log facility for reporting evdns problems. (e1766a1)
+ o Fix SEGFAULT after evdns_base_resume if no nameservers
+ installed. (f8d7df8 Azat Khuzhin)
+ o fix for ServFail from RIPE Atlas release (62f596b Antony Antony)
+
+ Bugfixes (compilation)
+ o Fix test compilation with nmake: add the gdi.lib dependency (5ba8ab7)
+ o Whoops. It is gdi.lib, not gdi32.lib. (github issue #61) (8ab612e)
+ o Don't use return since return type is void and build error occurs
+ using clang (838161d Makoto Kato)
+ o Use void casts to suppress some "unchecked return value" warns (7080d55)
+ o rpcgen: Generate regress.gen.[c,h] in build rather than src dir
+ (243386c Ross Lagerwall)
+ o Fix a compiler warning when checking for arc4random_buf linker
+ breakage. (5cb3865)
+ o Fix 'make distcheck' by adding regress.gen.[ch] to DISTCLEANFILES
+ (239d834)
+
+ o Fix a c90 warning (c207682)
+ o Fix consts in WIN32-Code/getopt*.[ch] (57abb35)
+
+ Bugfixes (locks, synchronization)
+ o Missed lock acquire/release in event_base_cancel_single_callback_()
+ (d3d999a Azat Khuzhin)
+ o Fix locking in bufferevent_get_options_(). (dbc9cd4 Maxime Henrion)
+
+ Bugfixes (leaks)
+ o Avoid leaking segment mappings when offset is not a page multiple (d409514)
+
+ Testing
+ o Add tests for evdns_base_resume(). (1cd9ff5 Azat Khuzhin)
+ o Fix dns/leak_resume_send_err test. (7e876df Azat Khuzhin)
+ o Add checks for evhttp_connection_get_server() in unit
+ tests. (fbc323b Maxime Henrion)
+ o Fix a (failure-only) null dereference in the unit tests (1104d0b)
+ o Fix a logic error in test_evbuffer_freeze (7765884)
+ o Add missing check to test_evbuffer_file_segment_add_cleanup_cb (eba4506)
+ o Fix some crash-on-fail cases in DNS regression tests (87cd6f0)
+ o DNS tests: add a missing check (f314900)
+ o Finalize tests: add a missing check (82b6956)
+ o test_evutil_rtrim: add another missing check. (e193c95)
+ o regress_main: logging all if env EVENT_DEBUG_LOGGING_ALL isset
+ (611e28b Azat Khuzhin)
+ o regress_http: add tests for evhttp_connection_get_addr() (4dd500c
+ Azat Khuzhin)
+ o Update to the latest version of tinytest (7a80476)
+ o Heap-allocate zlib data structure in regress_zlib tests (4947c18)
+
+ Performance tweaks (core)
+ o Avoid redundant syscall to make a nonblocking socket nonblocking
+ (42c03da Maxime Henrion)
+ o Avoid redundant syscall if making a socket cloexec twice (1f29b18)
+ o Avoid redundant invocations of init_extension_functions for IOCP (3b77d62)
+
+ Documentation
+ o Document that arc4random is not a great cryptographic PRNG. (6e49696)
+ o Small doxygen tweaks (6e67b51)
+ o Try another doxygen tweak (ccf432b)
+ o Clarify event_base_loop exit conditions (031a803)
+ o Fix a typo (be7bf2c Ondřej Kuzník)
+ o Document deferred eventcb behaviour (13a9a02 Ondřej Kuzník)
+ o Typo fixes from Linus Nordberg (cec62cb, 8cd695b)
+ o Fix duplicate paragraph in evbuffer_ptr documentation (58408ee)
+
+ Code Improvements (coverity)
+ o Fix a pile of coverity warnings in the unit tests (867f401)
+ o Fix coverity warnings in benchmark tools. (ff7f739)
+ o Whoops; fix compilation in bench.c (544cf88)
+ o Remove spurious checks in evrpc.c error cases (coverity) (991b362)
+ o Fix a couple of compilation warnings in regress_http.c (860767e)
+ o Fix even more coverity warnings. (d240328)
+ o Stop checking for inet_aton; we don't use it. (f665d5c)
+ o Add an include to evrpc-internal to fix openbsd compilation warning
+ (5e161c6)
+
+ Cleanups
+ o Remove an unreachable return statement in minheap-internal.h (e639a9e)
+ o Refactor evmap_{io,signal}_active_() to tolerate bad inputs (974c60e)
+ o Fix needless bufferevent includes in evdns.c (254c04e)
+ o Fix a couple of "#ifdef WIN32" instances (88ecda3)
+ o Remove unneeded declaration in bufferevent-internal.h (4c8ebcd)
+
+ Sample code
+ o le-proxy: Fail more gracefully if opening listener fails (44b2491)
+ o http-server: drop uri_root from base_url in http-server. (6171e1c Azat Khuzhin)
+ o https-client: POST supported, args supported (c5887f7 Alexey Ozeritsky)
+ o https-client: code cleanup (29af65e Alexey Ozeritsky)
+ o https-client: Small tweaks to https-client.c (90786eb)
+ o https-client: Set hostname for SNI extension (by f69m) (d1976f8)
+ o https-client: add a cast to https-client.c (462e6b6)
+
+
+
+Changes in version 2.1.3-alpha (1 May 2013)
+
+ Libevent 2.1.3-alpha fixes various bugs, adds new unit tests, and cleans
+ up the code in a couple of places. It has a new callback in evhttp for
+ reporting errors during a request, a new feature for allowing evdns to
+ not keep the event_base looping when there are no requests inflight, and
+ example code for writing an https client.
+
+ Libevent 2.1.3-alpha also has an important new (experimental) event
+ finalization feature to allow safe event teardown in multithreaded
+ programs. This ought to fix the longstanding bug with deadlocks in
+ multithreaded use of SSL-based bufferevents that some people have been
+ experiencing since Libevent 2.0.
+
+
+ Core (event finalization)
+ o Implement event_finalize() and related functions to avoid certain
+ deadlocks (8eedeab)
+ o Use finalization feature so bufferevents can avoid deadlocks (02fbf68)
+ o Always run pending finalizers when event_base_free() is called (e9ebef8)
+ o Remove bufferevent_del_generic_timeout_cbs as now unused (4ea4c6a)
+ o More documentation for finalization feature (a800b91)
+ o Make the event_finalize* functions return an error code (5d11f4f)
+ o Mark the finalize stuff as experiemental in case it needs to
+ change (23e2e29)
+
+ Evdns
+ o evdns: New flag to make evdns not prevent the event loop from
+ exiting (6b7fa62 Azat Khuzhin)
+
+ Bugfixes (Core)
+ o Make event_remove_timer behave correctly with persistent timers (5623e80)
+ o Unit test for event_remove_timer with EV_PERSIST. (96150dd)
+ o Double-check next timeout when adding events (9443868 Nate Rosenblum)
+ o event_base_update_cache_time should be a no-op if the loop isn't
+ running (5e6fa2a)
+
+ Bugfixes (evhttp, crash fix, from 2.0)
+ o fix #73 and fix http_connection_fail_test to catch it (b618204 Greg Hazel)
+
+ Bugfixes (compilation and portability, from 2.0)
+ o Fix compilation with WIN32_HAVE_CONDITION_VARIABLES enabled (7e45739)
+ o Fix missing AC_PROG_SED on older Autoconfs (9ab2b3f Tay Ray Chuan)
+ o Backport libevent to vanilla Autoconf 2.59 (as used in RHEL5)
+ (74d4c44 Kevin Bowling)
+ o Use AC_CONFIG_HEADERS in place of AM_CONFIG_HEADERS for autmake
+ 1.13 compat (817ea36)
+ o Rename configure.in to configure.ac to appease newer autoconfs (0c79787)
+ o Avoid using top_srcdir in TESTS: new automakes do not like this (a55514e)
+
+ Bugfixes (resource leaks/lock errors on error, from 2.0)
+ o Avoid leaking fds on evconnlistener with no callback set (69db261)
+ o Avoid double-close on getsockname error in evutil_ersatz_socketpair
+ (0a822a6)
+ o Fix a locking error in bufferevent_socket_get_dns_error. (0a5eb2e)
+
+ Documentation Fixes (from 2.0)
+ o Fix a mistake in evbuffer_remove() arguments in example http server code
+ (c322c20 Gyepi Sam)
+ o Fix a typo in a comment in buffer.h. Spotted by Alt_F4 (773b0a5)
+
+ Documentation Fixes
+ o minor documentation typos (809586a Patrick Pelletier)
+ o Fix cut-and-paste err in whatsnew-2.1 (49905ac)
+ o Fix comment to refer to sample/include.am correctly (9e8cdf3 Sebastian
+ Hahn)
+ o Fix typo : Dispatching instead of Dispaching (0c2bacc Volker Lendecke)
+ o fix some hinky indentation in evhttp_make_request (80e220e Patrick
+ Pelletier)
+ o "buffer" spelling (a452811 Patrick Pelletier)
+ o Specify return behavior in header for evbuffer_pullup() in corner case
+ (cf8d1cd Dan Petro)
+ o Clarify an important point about event_base_foreach_event() (920a5e6)
+
+ Compilation Fixes/Tool Support
+ o avoid valgrind false positive by zeroing epoll_event (1258614 Patrick
+ Pelletier)
+ o Fix harmless clang enum warning (b452a43 Sebastian Hahn)
+ o remove all exes on "make clean", not just regress.exe (974bfa0 Patrick
+ Pelletier)
+ o Make --disable-libevent-regress work again (787fd74)
+ o Do not build strlcpy.c when it will have no code. (4914620)
+
+ Portability Fixes
+ o When EWOULDBLOCK is not EAGAIN, treat it as equivalent to it (bf7a0ff)
+ o Preliminary changes for Minix3. (0dda56a Nicholas Heath)
+ o Use AC_CONFIG_HEADERS in place of AM_CONFIG_HEADERS for autmake 1.13
+ compat (bf278b)
+ o Avoid using $(top_srcdir) in TESTS. (2863c83)
+ o build test/test-script.sh on systems with a less-featureful $< (f935e21)
+ o Implement EVUTIL_ERR_IS_EAGAIN on windows. (42aaf4d)
+
+ Evhttp changes:
+ o Fix ipv6 support for http. When URL contain domain, not IP
+ address. (71e709c Azat Khuzhin)
+ o uri decode: fix for warning "use of uninitialised value" (64b6ece Azat
+ Khuzhin)
+ o uri decode: changed the test for the existence of the next character
+ (e1903e3 Azat Khuzhin)
+ o Move prototype of evhttp_decode_uri_internal() to http-internal.h
+ (de8101a Azat Khuzhin)
+ o Test: decoding just part of string with evhttp_decode_uri_internal()
+ (1367653 Azat Khuzhin)
+ o Add new error_cb for actual reporting of HTTP request errors. (7b07719
+ Azat Khuzhin)
+ o Add test for EVREQ_HTTP_REQUEST_CANCEL into http_cancel_test() (862c217
+ Azat Khuzhin)
+ o Drop extra header http_struct.h from regress_http.c (54cc800 Azat Khuzhin)
+
+ Testing
+ o Add regress test ipv6_for_domain. (9ec88bd Azat Khuzhin)
+ o Add an environment variable (EVENT_DEBUG_MODE) to run unit tests in debug
+ mode (2fad0f3)
+ o Add a test with an active_later event at event_base_free time. (1c3147f)
+ o Make all tests pass under EVENT_DEBUG_MODE=1 (b1b054f)
+ o Add some verbose notes to bufferevent unit tests (9d893c9)
+ o New test for active_later->active transition on event_active (a153874)
+ o New tests for event_base_foreach_event() (0b096ef)
+ o Unit tests for event_base_gettimeofday_cached() and
+ event_base_update_cache_time() (30ea291)
+ o A test for event_get_assignment() (f09629e)
+ o More unit tests for initializing common timeouts. (d596739)
+ o Fix a bug in the new main/event_foreach test (702c9aa)
+
+ Windows:
+ o use FormatMessage for winsock errors (0c6ec5d, 2078e9b, 4ccdd53, c9ad3af
+ Patrick Pelletier)
+ o a program to print out the error strings for winsock errors (7296512
+ Patrick Pelletier)
+ o Fix a warning introduced in 0c6ec5d8 (eeb700c)
+ o Fix another warning introduced in 0c6ec5d8 (ed26561)
+
+ Examples (http)
+ o Add sample/https-client.c, an example of stacking evhttp as a client on
+ top of bufferevent_ssl. (be46c99 Catalin Patulea)
+ o use ${OPENSSL_LIBS} instead of -lssl -lcrypto (bf31fa5 Patrick Pelletier)
+ o https-client was putting newlines at 256-byte boundaries (42d7441 Patrick
+ Pelletier)
+ o better handling of OpenSSL errors (5754d96 Patrick Pelletier)
+ o use Debian's default root certificate location (aacd674 Patrick Pelletier)
+ o use iSECPartners code to validate hostname in certificate (64d9f16
+ Patrick Pelletier)
+ o avoid sign mismatch warning in openssl_hostname_validation.c (6021cb5
+ Patrick Pelletier)
+ o pull in wildcard matching code from cURL (4db9da6 Patrick Pelletier)
+ o Another tweak to https-client.c (95acdaa)
+ o Remove http_struct.h usage in sample/https-client.c (8a90a85)
+
+
+
+Changes in version 2.1.2-alpha (18 Nov 2012)
+
+ Libevent 2.1.2-alpha includes more portable for monotonic timers,
+ refactors much of Libevent's internal and external infrastructure,
+ closes some longstanding gaps in the interface, makde other
+ improvements. Ths log below tries to organize features by rough area of
+ effect. It omits a few commits which were pure bugfixes on other commits
+ listed below. For more detail, see the git changelogs. For more
+ insight, see the "whatsnew-2.1.txt" document included in the Libevent
+ 2.1.2-alpha distribution.
+
+ Libevent 2.1.2-alpha also includes all changes made in 2.0.19-stable
+ through 2.0.21-stable inclusive.
+
+ Performance (core):
+ o Replace pipe-based notification with EVFILT_USER where possible. This
+ should make multithreaded programs on OSX and *BSD alert the main thread a
+ little faster. (53a07fe)
+ o Make th_base_lock nonrecursive. (9cd5acb)
+
+ New/Changed API Functions:
+ o New event_get_priority() function to return an event's priority (f90e255)
+ o Add a bufferevent_get_priority() function (bd39554)
+ o Add an event_base_loopcontinue() to tell Libevent to rescan for more
+ events right away (7d6aa5e)
+ o Add a new callback to get called on evbuffer_file_segment free
+ (e9f8feb yangacer, 64051b9)
+ o Expose event_base_foreach_event() as a public API. (84fd6d7 Roman
+ Puls, 232055e, ffe1643)
+ o Add an event_remove_timer() to remove timer on an event without
+ deleting it (e3b2e08)
+ o Make bufferevent_set_timeouts(bev, NULL, NULL) have plausible
+ semantics (9dee36b)
+ o Rename event_enable_lock_debuging() to ..._debugging(). (The old name
+ should still work.) (07e132e)
+ o Add missing implementation for event_enable_debug_logging (3b3e21d)
+
+ PORTABLE MONOTONIC TIMERS:
+
+ Libevent 2.1.2 includes internal support for monotonic timers on
+ (nearly) all supported platforms, including Windows, and OSX. Libevent
+ applications should now be more resilient to jumps forwards or backwards
+ in the system clock. Also, on Linux systems with epoll, we now
+ optionally support microsecond-level timeouts (whereas epoll only
+ supports millisecond-precision timeouts).
+
+ o Use mach_absolute_time() for monotonic clock support on OSX. (b8fd6f9)
+ o Do not track use_monotonic field when is no monotonic clock (cb653a0)
+ o EVENT_BASE_FLAG_PRECISE_TIMER indicates we want fine timer precision
+ (ddd69d3)
+ o On Linux, use CLOCK_MONOTONIC_COARSE by default (55780a7)
+ o Implement a GetTickCount-based monotonic timer for Windows (d5e1d5a)
+ o Refactor monotonic timer handling into a new type and set of
+ functions; add a gettimeofday-based ratcheting implementation (f5e4eb0)
+ o Add EVENT_PRECISE_TIMER environment var for selecting precise-but-slow
+ timer (a2598ec)
+ o Implement fast/precise monotonic clocks on Windows (2c47045)
+ o Simple unit tests for monotonic timers (630f077)
+ o Improve the monotonic-time unit test: make it check the step size (7428c78)
+ o When PRECISE_TIMERS is set with epoll, use timerfd for microsecond
+ precision (26c7582)
+ o Split out time-related evutil functions into a new evutil_time.c (c419485)
+ o Split out time-related prototypes into time-internal.h (71bca50)
+ o Add evutil_time.obj to Makefile.nmake (0ba0683)
+ o Avoid giving a spurious warning when timerfd support is unavailable
+ (1aaf9f0 Dave Hart)
+ o Make test_evutil_monotonic a little more tolerant (def3b83)
+ o Avoid unused-var warning on systems with clock_gettime but without
+ CLOCK_MONOTONIC_COARSE (9be5468)
+
+EVENT_BASE_ONCE LEAKS:
+ If a callback added by event_base_once() is never invoked, Libevent no
+ longer leaks internal memory.
+
+ o Free dangling event_once objects on event_base_free() (c17dd59)
+ o Add a unit test in which an event is created with event_base_once()
+ but never fires (4343edf)
+
+TESTING SUPPORT, FIXES AND IMPROVEMENTS:
+
+ Libevent now disables by default its unit tests that would touch the
+ network, or that tend to fail on heavily-loaded systems. To re-enable
+ them, invoke the ./test/regress program with the @all alias.
+
+ o Simplify test.sh code significantly. (9b856fd Ross Lagerwall)
+ o Make all tests that hit the network disabled by default (f2cea87)
+ o Avoid a resource leak on error in http client benchmark (ea92fba)
+ o Update to latest tinytest (911b4f0349377) (ef7c4f7)
+ o Avoid (unlikely) overflow in bench_httpclient.c (5671033)
+ o Shave 700 msec off the persistent_timeout_jump test (21205b8)
+ o Check return value of write() in regress.c (c8009d2)
+ o Make load-dependent monotonic timer tests off-by-default (2b6fe8b)
+ o Add deferred_cb_skew to list of timing-dependent tests (34c8f31)
+ o Avoid test -e; older shs don't have one. (f1bd938)
+ o Fix renegotiation test to work around openssl 1.0.1 bug (c2f3086)
+ o Fix a couple of compile warnings in the unit tests (5a9a014)
+
+MISC:
+ o Change evutil_weakrand_() to avoid platform random() (e86af4b Nicholas
+ Marriott, 3aa4415)
+
+INFRASTRUCTURE (Active-later events):
+ As a simplification and optimization to Libevent's "deferred callback"
+ logic (introduced in 2.0 to avoid callback recursion), Libevent now
+ treats all of its deferrable callback types using the same logic it uses
+ for active events. Now deferred events no longer cause priority
+ inversion, no longer require special code to cancel them, and so on.
+
+ o Refactor the callback part of an event into its own event_callback
+ type (cba59e5)
+ o Add "active later" event_callbacks to supersede deferred (745a63d)
+ o event_base_assert_ok: check value of event_active_count for
+ correctness (fec8bae)
+ o Replace deferred_cbs with event_callback-based implementation. (ae2b84b)
+ o Replace more deferred_cb names with event_callback (a4079aa)
+ o Give event_base_process_active a single exit path (581b5be)
+ o Restore our priority-inversion-prevention code with deferreds (c0e425a)
+ o Refactor event_persist_closure: raise and extract some common logic
+ (bec22b4)
+ o Remove the unused bits from EVLIST_ALL (9889a3d)
+||||||| merged common ancestors
+Changes in version 2.0.22-stable (?? Dec 2013)
+
+ (As of 3b77d62829c4393bda6f9105a5d3b73b48a64b71.)
+
+BUGFIXES (evhttp)
+ o fix #73 and fix http_connection_fail_test to catch it (crash fix) (b618204 Greg Hazel)
+ o Avoid racy bufferevent activation (5eb1788 Nate Rosenblum)
+
+BUGFIXES (compilation and portability)
+ o Fix compilation with WIN32_HAVE_CONDITION_VARIABLES enabled (7e45739)
+ o Fix missing AC_PROG_SED on older Autoconfs (9ab2b3f Tay Ray Chuan)
+ o Backport libevent to vanilla Autoconf 2.59 (as used in RHEL5) (74d4c44 Kevin Bowling)
+ o Use AC_CONFIG_HEADERS in place of AM_CONFIG_HEADERS for autmake 1.13 compat (817ea36)
+ o Rename configure.in to configure.ac to appease newer autoconfs (0c79787)
+ o Avoid using top_srcdir in TESTS: new automakes do not like this (a55514e)
+ o Use windows vsnprintf fixup logic on all windows environments (e826f19)
+ o Fix a compiler warning when checking for arc4random_buf linker breakage. (5cb3865)
+ o Fix another arc4random_buf-related warning (e64a2b0)
+
+BUGFIXES (resource leaks/lock errors on error)
+ o Avoid leaking fds on evconnlistener with no callback set (69db261)
+ o Avoid double-close on getsockname error in evutil_ersatz_socketpair (0a822a6)
+ o Fix a locking error in bufferevent_socket_get_dns_error. (0a5eb2e)
+ o libevent/win32_dealloc() : fix sizeof(pointer) vs sizeof(*pointer) (b8f5980 Frank Denis)
+
+BUGFIXES (miscellaneous)
+ o Avoid other RNG initialization FS reads when urandom file is specified (9695e9c, bb52471)
+ o Avoid redundant invocations of init_extension_functions for IOCP (3b77d62)
+
+BUFGIXES (evdns)
+ o Checking request nameserver for NULL, before using it. (5c710c0 Belobrov Andrey)
+ o Fix SEGFAULT after evdns_base_resume if no nameservers installed. (f8d7df8 Azat Khuzhin)
+
+BUGFIXES (evutil_secure_random)
+ o When we seed from /proc/sys/kernel/random/uuid, count it as success (e35b540)
+ o Document that arc4random is not a great cryptographic PRNG. (6e49696)
+ o Add evutil_secure_rng_set_urandom_device_file (2bbb5d7)
+ o Really remove RNG seeds from the stack (f5ced88)
+
+
+DOCUMENTATION FIXES
+ o Fix a mistake in evbuffer_remove() arguments in example http server code (c322c20 Gyepi Sam)
+ o Fix a typo in a comment in buffer.h. Spotted by Alt_F4 (773b0a5)
+
+
+
+Changes in version 2.0.21-stable (18 Nov 2012)
+BUGFIXES:
+ o ssl: Don't discard SSL read event when timeout and read come close together (576b29f)
+ o ssl: Stop looping in "consider_reading" if reading is suspended. (f719b8a Joachim Bauch)
+ o ssl: No need to reserve space if reading is suspended. (1acf2eb Joachim Bauch)
+ o dns: Avoid a memory-leak on OOM in evdns. (73e85dd, f2bff75 George Danchev)
+ o build: Use python2 rather than python (0eb0109 Ross Lagerwall)
+ o build: Compile without warnings on mingw64 (94866c2)
+ o build: Fix compilation on mingw64 with -DUSE_DEBUG (62bd2c4)
+ o build: Make rpcgen_wrapper.sh work on systems without a "python2" binary (f3009e4)
+ o iocp: Close IOCP listener socket on free when LEV_OPT_CLOSE_ON_FREE is set (cb853ea Juan Pablo Fernandez)
+ o core: Avoid crash when event_pending() called with no event_base set on event (e3cccf3)
+ o misc: remove stray 'x' so print_err will compile when uncommented (ac35650 Patrick Pelletier)
+ o tests: Fix renegotiation test to work around openssl 1.0.1 bug (c2f3086)
+ o tests: Warn when openssl version in unit test mismatches compiled version. (ac009f9)
+
+
+Changes in version 2.0.20-stable (23 Aug 2012)
+BUGFIXES:
+ o core: Make event_pending() threadsafe. (be7a95c Simon Liu)
+ o win32: avoid crash when waiting forever on zero fds. (160e58b)
+ o evhttp: Fix a memory leak on error in evhttp_uriencode (11c8b31)
+ o evbuffer: Avoid possible needless call to writev. Found by coverity. (6a4ec5c)
+ o evdns: memset sockaddr_in before using it. Found by coverity. (a1a0e67)
+ o evhttp: Check more setsockopt return values when binding sockets. Found by coverity (a0912e3)
+ o evdns: Avoid segfault on weird timeout during name lookup. (dc32077 Greg Hazel)
+ o bufferevent_ssl: Correctly invoke callbacks when a SSL bufferevent reads some and then blocks. (606ac43)
+
+
+PORTABILITY FIXES:
+ o check for arc4random_buf at runtime, on OS X (bff5f94 Greg Hazel)
+ o Correctly check for arc4random_buf (fcec3e8 Sebastian Hahn)
+ o Add explicit AC_PROG_SED to configure.in so all autoconfs will expose $(SED) (ca80ea6)
+
+BUILD FIXES:
+ o Add GCC annotations so that the vsprintf functions get checked properly (117e327)
+ o Fix an unused variable warning on *BSD. (c0720c1)
+
+UNIT TEST FIXES:
+ o Fix a couple of memory leaks (found with Valgrind). (3b2529a Ross Lagerwall)
+ o Remove deadcode in http regression tests. Found by coverity. (5553346)
+ o Fix possible uninitialized read in dns regression tests. Found by coverity. (2259777)
+ o Set umask before calling mkstemp in unit tests. Found by coverity (f1ce15d)
+ o Fix various check-after-dereference issues in unit tests: found by coverity (4f3732d)
+ o Fix resource leaks in the unit tests; found by coverity (270f279)
+ o Add some missing null checks to unit tests; found by coverity (f021c3d)
+ o Avoid more crashes/bad calls in unit tests; found by coverity (3cde5bf)
+ o Remove unused variable; spotted by coverity (6355b2a)
+ o Add checks to various return values in unit tests. Found by coverity (b9e7329)
+ o Move assignment outside tt_assert in ssl unit tests. Appeases coverity. (a2006c0)
+
+
+
+Changes in version 2.0.19-stable (3 May 2012)
+BUGFIXES (CORE):
+ o Refactor event_persist_closure: raise and extract some common logic (bec22b4)
+ o If time has jumped so we'd reschedule a periodic event in the past, schedule it for the future instead (dfd808c)
+ o If a higher-priority event becomes active, don't continue running events of the current priority. (2bfda40)
+
+BUGFIXES (SSL):
+ o Fixed potential double-readcb execution with openssl bufferevents. (4e62cd1 Mark Ellzey)
+
+BUGFIXES (DNS):
+ o Cancel a probe request when the server is freed, and ignore cancelled probe callbacks (94d2336 Greg Hazel)
+ o Remove redundant DNS_ERR_CANCEL check, move comment (46b8060 Greg Hazel)
+ o When retransmitting a timed-out DNS request, pick a fresh nameserver. (3d9e52a)
+
+DOCUMENTATION FIXES:
+ o Fix a typo in the bufferevent documentation (98e9119)
+ o Add missing ) to changelog; spotted by rransom (4c7ee6b)
+ o Fix the website URL in the readme (f775521)
+
+COMPILATION FIXES:
+ o Fix a compilation error with MSVC 2005 due to use of mode_t (336dcae)
+ o Configure with gcc older than 2.95 (4a6fd43 Sebastian Hahn)
+ o Generate event-config.h with a single sed script (30b6f88 Zack Weinberg)
+
+FORWARD-COMPATIBILITY:
+ o Backport: provide EVENT_LOG_* names, and deprecate _EVENT_LOG_* (d1a03b2)
+
+TESTING/DEBUGGING SUPPORT:
+ o dns-example.c can now take a resolv.conf file on the commandline (6610fa5)
+ o Make some evdns.c debug logs more verbose (d873d67)
+ o Work-around a stupid gcov-breaking bug in OSX 10.6 (b3887cd)
+
+
+
+Changes in version 2.0.18-stable (22 Mar 2012)
+BUGFIXES (core):
+ o Make uses of open() close-on-exec safe by introducing an internal evutil_open_closeonexec. (d2b5f72 Ross Lagerwall, 03dce42)
+
+BUGFIXES (kqueue):
+ o Properly zero the kevent in kq_setup_kevent() (c2c7b39 Sebastian Hahn)
+
+BUILD FIXES:
+ o Added OPENSSL_LDFLAGS env variable which is appended to SSL checks. (9278196 Mark Ellzey)
+ o Changed OPENSSL_LDFLAGS to OPENSSL_LIBADD (2d67b63 Mark Ellzey)
+ o Don't do clang version detection when disabling some flags (083296b Sebastian Hahn)
+
+BUGFIXES (dns):
+ o Stop crashing in evdns when nameserver probes give a weird error (bec5068)
+
+
+Changes in version 2.0.17-stable (10 Feb 2012)
+
+BUGFIXES (core):
+ o Be absolutely sure to clear pncalls before leaving event_signal_closure (11f36a5)
+ o check for sysctl before we use it (358c745 Mike Frysinger)
+ o Remove bogus casts of socket to int before calling ev_callback (f032516)
+ o Make evconnlistener work around bug in older Linux when getting nmapped (ecfc720)
+ o Fix a list corruption bug when using event_reinit() with signals present (6e41cdc)
+ o Fix a fd leak in event_reinit() (3f18ad1)
+ o Do a memberwise comparison of threading function tables (c94a5f2 Nate R)
+ o Use C-style comments in C source files (for compatibility with compilers such as xlc on AIX). (d84d917 Greg Hewgill)
+ o Avoid crash when freeing event_iocp and using event_set_mem_functions (19715a6)
+ o In the kqueue backend, do not report EBADF as an EV_READ (5d7bfa1 Nicholas Marriott)
+
+BUGFIXES (evbuffer and bufferevents):
+ o Fix behavior of evbuffer_peek(buf,-1,NULL,NULL,0) (c986f23 Zack Weinberg)
+ o Loop on filtering SSL reads until we are blocked or exhausted. (5b4b812)
+
+BUGFIXES (evhttp):
+ o Force strict validation of HTTP version in response. (790f6b3 Catalin Patulea)
+
+BUGFIXES (evdns):
+ o evdns: fix a bug in circular-queue implementation (d6094b1)
+
+BUILD FIXES:
+ o Fix a silly compilation error with the sun compiler (1927776 Colin Watt)
+ o Suppress a gcc warning from ignoring fwrite return in http-sample.c (7206e8c)
+
+DOCUMENTATION FIXES:
+ o Slightly clarify evbuffer_peek documentation (7bbf6ca)
+ o Update copyright notices to 2012 (e49e289)
+
+NEW APIS:
+ o Backport evhttp_connection_get_bufferevent to Libevent 2.0 (da70fa7 Arno Bakker)
+
+TESTS AND TEST FIXES:
+ o Fix a race condition in the dns/bufferevent_connect_hostname test. (cba48c7)
+ o Add function to check referential integrity of an event_base (27737d5)
+ o Check event_base correctness at end of each unit test (3312b02)
+ o Workaround in the unit tests for an apparent epoll bug in Linux 3.2 (dab9187)
+ o Better workaround for Linux 3.2 edge-triggered epoll bug (9f9e259)
+
+Changes in version 2.0.16-stable (18 Nov 2011)
+BUGFIXES (core):
+ o More detailed message in case of libevent self-debugging failure. (9e6a4ef Leonid Evdokimov)
+ o epoll: close fd on alloc fail at initialization (1aee718 Jamie Iles)
+ o Fix compile warning from saying event2/*.h inside a comment (447b0ba)
+ o Warn when unable to construct base because of failing make_base_notifiable (4e797f3)
+ o Don't try to make notifiable event_base when no threading fns are configured (e787413)
+
+BUGFIXES (evbuffer):
+ o unit test for remove_buffer bug (90bd620 Greg Hazel)
+ o Fix an evbuffer crash in evbuffer_remove_buffer() (c37069c)
+
+BUGFIXES (bufferevent_openssl):
+ o Refactor amount-to-read calculations in buffervent_ssl consider_reading() (a186e73 Mark Ellzey)
+ o Move SSL rate-limit enforcement into bytes_to_read() (96c562f)
+ o Avoid spinning on OpenSSL reads (2aa036f Mark Ellzey)
+
+BUGFIXES (dns)
+ o Empty DNS reply with OK status is another way to say NODATA. (21a08d6 Leonid Evdokimov)
+
+TESTING:
+ o Tests for 94fba5b and f72e8f6 (d58c15e Leonid Evdokimov)
+ o Test for commit aff6ba1 (f7841bf Leonid Evdokimov)
+ o Style and comment tweaks for dns/leak* tests (5e42202)
+ o improve test to remove at least one buffer from src (7eb52eb Greg Hazel)
+
+DOCUMENTATION:
+ o Add note about evhttp_send_reply_end to its doxygen (724bfb5)
+ o Update copyright dates to 2011. (3c824bd)
+ o Fix typo in whatsnew-2.0.txt (674bc6a Mansour Moufid)
+ o Improve win32 behavior of dns-sample.c code (a3f320e Gisle Vanem)
+
+
+
+Changes in version 2.0.15-stable (12 Oct 2011)
+BUGFIXES (DNS):
+ o DNS: add ttl for negative answers using RFC 2308 idea. (f72e8f6 Leonid Evdokimov)
+ o Add DNS_ERR_NODATA error code to handle empty replies. (94fba5b Leonid Evdokimov)
+
+BUFGIXES (bufferevents and evbuffers):
+ o Make evbuffer callbacks get the right n_added value after evbuffer_add (1ef1f68 Alex)
+ o Prefer mmap to sendfile unless a DRAINS_TO_FD flag is set. Allows add_file to work with SSL. (0ba0af9)
+
+BUGFIXES (event loop):
+ o When a signal callback is activated to run multiple times, allow event_base_loopbreak to work even before they all have run. (4e8eb6a)
+
+DOCUMENTATION FIXES:
+ o Fix docstring in dns.h (2b6eae5 Leonid Evdokimov)
+ o refer to non-deprecated evdns functions in comments (ba5c27d Greg Hazel)
+
+BUILD AND TESTING FIXES:
+ o le-proxy and regress depend on openssl directly (9ae061a Sergey Avseyev)
+ o Use _SOURCES, not _sources, in sample/Makefile.am (7f82382)
+ o Fixed compiler warnings for unchecked read/write calls. (c3b62fd Mark Ellzey)
+ o Make write-checking fixes use tt_fail_perror (2b76847)
+ o Fix some "value never used" warnings with gcc 4.6.1 (39c0cf7)
+
+
+
+Changes in version 2.0.14-stable (31 Aug 2011)
+BUGFIXES (bufferevents and evbuffers):
+ o Propagate errors on the underlying bufferevent to the user. (4a34394 Joachim Bauch)
+ o Ignore OpenSSL deprecation warnings on OS X (5d1b255 Sebastian Hahn)
+ o Fix handling of group rate limits under 64 bytes of burst (6d5440e)
+ o Solaris sendfile: correctly detect amount of data sent (643922e Michael Herf)
+ o Make rate limiting work with common_timeout logic (5b18f13)
+ o clear read watermark on underlying bufferevent when creating filtering bev to fix potentially failing fragmented ssl handshakes (54f7e61 Joachim Bauch)
+
+BUGFIXES (IOCP):
+ o IOCP: don't launch reads or writes on an unconnected socket (495c227)
+ o Make IOCP rate-limiting group support stricter and less surprising. (a98da7b)
+ o Have test-ratelim.c support IOCP (0ff2c5a)
+ o Make overlapped reads result in evbuffer callbacks getting invoked (6acfbdd)
+ o Correctly terminate IO on an async bufferevent on bufferevent_free (e6af35d)
+
+BUGFIXES (other):
+ o Fix evsig_dealloc memory leak with debugging turned on. (9b724b2 Leonid Evdokimov)
+ o Fix request_finished memory leak with debugging turned on. (aff6ba1 Leonid Evdokimov)
+
+BUILD AND TESTING FIXES:
+ o Allow OS-neutral builds for platforms where some versions have arc4random_buf (b442302 Mitchell Livingston)
+ o Try to fix 'make distcheck' errors when building out-of-tree (04656ea Dave Hart)
+ o Clean up some problems identified by Coverity. (7c11e51 Harlan Stenn)
+
+
+Changes in version 2.0.13-stable (18 Jul 2011)
+BUGFIXES
+ o Avoid race-condition when initializing global locks (b683cae)
+ o Fix bug in SSL bufferevents backed by a bev with a write high-watermarks (e050703 Joachim Bauch)
+ o Speed up invoke_callbacks on evbuffers when there are no callbacks (f87f568 Mark Ellzey)
+ o Avoid a segfault when all methods are disabled or broken (27ce38b)
+ o Fix incorrect results from evbuffer_search_eol(EOL_LF) (4461f1a)
+ o Add some missing checks for mm_calloc failures (89d5e09)
+ o Replace an assertion for event_base_free(NULL) with a check-and-warn (09fe97d)
+ o Report kqueue ebadf, epipe, and eperm as EV_READ events (1fd34ab)
+ o Check if the `evhttp_new_object' function in `http.c' returns NULL. (446cc7a Mansour Moufid)
+ o Use the correct printf args when formatting size_t (3203f88)
+ o Complain if the caller tries to change threading cbs after setting them (cb6ecee)
+
+DOCUMENTATION FIXES AND IMPROVEMENTS
+ o Revise the event/evbuffer/bufferevent doxygen for clarity and accuracy (2888fac)
+ o Update Doxyfile to produce more useful output (aea0555)
+
+TEST FIXES
+ o Fix up test_evutil_snprintf (caf695a)
+ o Fix tinytest invocation from windows shell (57def34 Ed Day)
+
+BUILD FIXES
+ o Use AM_CPPFLAGS in sample/Makefile.am, not AM_CFLAGS (4a5c82d)
+ o Fix select.c compilation on systems with no NFDBITS (49d1136)
+ o Fix a few warnings on OpenBSD (8ee9f9c Nicholas Marriott)
+ o Don't break when building tests from git without python installed (b031adf)
+ o Don't install event_rpcgen.py when --disable-libevent-install is used (e23cda3 Harlan Stenn)
+ o Fix AIX build issue with TAILQ_FOREACH definition (e934096)
+
+
+Changes in version 2.0.12-stable (4 Jun 2011)
+BUGFIXES
+ o Fix a warn-and-fail bug in kqueue by providing kevent() room to report errors (28317a0)
+ o Fix an assert-inducing fencepost bug in the select backend (d90149d)
+ o Fix failing http assertion introducd in commit 0d6622e (0848814 Kevin Ko)
+ o Fix a bug that prevented us from configuring IPv6 nameservers. (74760f1)
+ o Prevent size_t overflow in evhttp_htmlescape. (06c51cd Mansour Moufid)
+ o Added several checks for under/overflow conditions in evhttp_handle_chunked_read (a279272 Mark Ellzey)
+ o Added overflow checks in evhttp_read_body and evhttp_get_body (84560fc Mark Ellzey)
+
+DOCUMENTATION:
+ o Add missing words to EVLOOP_NONBLOCK documentation (9556a7d)
+
+BUILD FIXES
+ o libssl depends on libcrypto, not the other way around. (274dd03 Peter Rosin)
+ o Libtool brings in the dependencies of libevent_openssl.la automatically (7b819f2 Peter Rosin)
+ o Use OPENSSL_LIBS in Makefile.am (292092e Sebastian Hahn)
+ o Move the win32 detection in configure.in (ceb03b9 Sebastian Hahn)
+ o Correctly detect openssl on windows (6619385 Sebastian Hahn)
+ o Fix a compile warning with zlib 1.2.4 and 1.2.5 (5786b91 Sebastian Hahn)
+ o Fix compilation with GCC 2, which had no __builtin_expect (09d39a1 Dave Hart)
+ o Fix new warnings from GCC 4.6 (06a714f)
+ o Link with -lshell32 and -ladvapi32 on Win32. (86090ee Peter Rosin)
+ o Make the tests build when OpenSSL is not available. (07c41be Peter Rosin)
+ o Bring in the compile script from automake, if needed. (f3c7a4c Peter Rosin)
+ o MSVC does not provide S_ISDIR, so provide it manually. (70be7d1 Peter Rosin)
+ o unistd.h and sys/time.h might not exist. (fe93022 Peter Rosin)
+ o Make sure TINYTEST_LOCAL is defined when building tinytest.c (8fa030c Peter Rosin)
+ o Fix winsock2.h #include issues with MSVC (3d768dc Peter Rosin)
+ o Use evutil_gettimeofday instead of relying on the system gettimeofday. (0de87fe Peter Rosin)
+ o Always use evutil_snprintf, even if OS provides it (d1b2d11 Sebastian Hahn)
+ o InitializeCriticalSectionAndSpinCount requires _WIN32_WINNT >= 0x0403. (816115a Peter Rosin)
+ o cygwin: make it possible to build DLLs (d54d3fc)
+
+
+
+Changes in version 2.0.11-stable (27 Apr 2011)
+ [Autogenerated from the Git log, sorted and cleaned by hand.]
+BUGFIXES:
+ o Fix evport handling of POLLHUP and POLLERR (b42ce4b)
+ o Fix compilation on Windows with NDEBUG (cb8059d)
+ o Check for POLLERR, POLLHUP and POLLNVAL for Solaris event ports (0144886 Trond Norbye)
+ o Detect and handle more allocation failures. (666b096 Jardel Weyrich)
+ o Use event_err() only if the failure is truly unrecoverable. (3f8d22a Jardel Weyrich)
+ o Handle resize failures in the select backend better. (83e805a)
+ o Correctly free selectop fields when select_resize fails in select_init (0c0ec0b)
+ o Make --enable-gcc-warnings a no-op if not using gcc (3267703)
+ o Fix a type error in our (unused) arc4random_stir() (f736198)
+ o Correctly detect and stop non-chunked http requests when the body is too long (63a715e)
+ o Have event_base_gettimeofday_cached() always return wall-clock time (a459ef7)
+ o Workaround for http crash bug 3078187 (5dc5662 Tomash Brechko)
+ o Fix incorrect assertions and possible use-after-free in evrpc_free() (4b8f02f Christophe Fillot)
+ o Reset outgoing http connection when read data in idle state. (272823f Tomash Brechko)
+ o Fix subtle recursion in evhttp_connection_cb_cleanup(). (218cf19 Tomash Brechko)
+ o Fix the case when failed evhttp_make_request() leaved request in the queue. (0d6622e Tomash Brechko)
+ o Fix a crash bug in evdns server circular list code (00e91b3)
+ o Handle calloc failure in evdns. (Found by Dave Hart) (364291e)
+ o Fix a memory leak on win32 socket->event map. (b4f89f0)
+ o Add a forgotten NULL check to evhttp_parse_headers (12311ff Sebastian Hahn)
+ o Fix possible NULL-deref in evdns_cancel_request (5208544 Sebastian Hahn)
+
+PORTABILITY:
+ o Fall back to sscanf if we have no other way to implement strtoll (453317b)
+ o Build correctly on platforms without sockaddr_storage (9184563)
+ o Try to build correctly on platforms with no IPv6 support (713c254)
+ o Build on systems without AI_PASSIVE (cb92113)
+ o Fix http unit test on non-windows platforms without getaddrinfo (6092f12)
+ o Do not check for gethostbyname_r versions if we have getaddrinfo (c1260b0)
+ o Include arpa/inet.h as needed on HPUX (10c834c Harlan Stenn)
+ o Include util-internal.h as needed to build on platforms with no sockaddr_storage (bbf5515 Harlan Stenn)
+ o Check for getservbyname even if not on win32. (af08a94 Harlan Stenn)
+ o Add -D_OSF_SOURCE to fix hpux builds (0b33479 Harlan Stenn)
+ o Check for allocation failures in apply_socktype_protocol_hack (637d17a)
+ o Fix the check for multicast or broadcast addresses in evutil_check_interfaces (1a21d7b)
+ o Avoid a free(NULL) if out-of-memory in evdns_getaddrinfo. Found by Dave Hart (3417f68)
+
+DEFENSIVE PROGRAMMING:
+ o Add compile-time check for AF_UNSPEC==PF_UNSPEC (3c8f4e7)
+
+BUGS IN TESTS:
+ o Fix test.sh output on solaris (b4f89b6 Dave Hart)
+ o Make test-eof fail with a timeout if we never get an eof. (05a2c22 Harlan Stenn)
+ o Use %s with printf in test.sh (039b9bd)
+ o Add an assert to appease clang's static analyzer (b0ff7eb Sebastian Hahn)
+ o Add a forgotten return value check in the unit tests (3819b62 Sebastian Hahn)
+ o Actually send NULL request in http_bad_request_test (b693c32 Sebastian Hahn)
+ o add some (void) casts for unused variables (65707d7 Sebastian Hahn)
+ o Refactor test_getaddrinfo_async_cancel_stress() (48c44a6 Sebastian Hahn)
+ o Be nice and "handle" error return values in sample code (4bac793 Sebastian Hahn)
+ o Check return value of evbuffer_add_cb in tests (93a1abb Sebastian Hahn)
+ o Remote some dead code from dns-example.c (744c745 Sebastian Hahn)
+ o Zero a struct sockaddr_in before using it (646f9fe Sebastian Hahn)
+
+BUILD FIXES:
+ o Fix warnings about AC_LANG_PROGRAM usage (f663112 Sebastian Hahn)
+ o Skip check for zlib if we have no zlib.h (a317c06 Harlan Stenn)
+ o Fix autoconf bracket issues; make check for getaddrinfo include netdb.h (833e5e9 Harlan Stenn)
+ o Correct an AM_CFLAGS to an AM_CPPFLAGS in test/Makefile.am (9c469db Dave Hart)
+ o Fix make distcheck & installation of libevent 1 headers (b5a1f9f Dave Hart)
+ o Fix compilation under LLVM/clang with --enable-gcc-warnings (ad9ff58 Sebastian Hahn)
+
+FEATURES:
+ o Make URI parser able to tolerate nonconformant URIs. (95060b5)
+
+DOCUMENTATION:
+ o Clarify event_set_mem_functions doc (926f816)
+ o Correct evhttp_del_accept_socket documentation on whether socket is closed (f665924)
+ o fix spelling mistake in whatsnew-2.0.txt (deb2f73)
+ o Fix sample/http-server ipv6 fixes (eb692be)
+ o Comment internal headers used in sample code. (4eb281c)
+ o Be explicit about how long event loops run in event.h documentation (f95bafb)
+ o Add comment to configure.in to explain gc-sections test logic (c621359)
+ o Fix a couple of memory leaks in samples/http-server.c. Found by Dave Hart. (2e9f665)
+
+
+
+BUILD IMPROVEMENTS:
+ Libevent 2.1.2-alpha modernizes Libevent's use of autotools, and makes
+ numerous other build system. Parallel builds should be faster, and all
+ builds should be quieter.
+
+ o Split long lists in Makefile.am into one-item-per-line (2711cda)
+ o Remove unnecessary code in configure.in. (e65914f Ross Lagerwall)
+ o attempt to support OpenSSL in Makefile.nmake (eba0eb2 Patrick Pelletier)
+ o Use newer syntax for autoconf/automake init (7d60ba8)
+ o Enable silent build rules by default. Override with V=1 (7b18e5c)
+ o Switch to non-recursive makefiles (7092f3b)
+ o Rename subordinate Makefile.ams to include.am (6cdfeeb)
+ o Make quiet build even quieter (371a123)
+ o New --quiet option for event_rpcgen.py (aa59c1e)
+ o Be quiet when making regress.gen.[ch] (607a8ff)
+ o Fix handling of no-python case for nonrecursive make (1e3123d)
+ o We now require automake 1.9 or later. Modernize! (b7f6e89)
+ o Rename configure.in to configure.ac. (b3fea67 Ross Lagerwall)
+ o Use correct openssl libs and includes in pkgconfig file (d70af27)
+ o Use the same CFLAGS for openssl when building unit tests as with
+ libevent (1d9d511)
+
+DOCUMENTATION
+ o Note that make_base_notifiable should not be necessary (26ee5f9)
+ o Be more clear that LEV_OPT_DEFERRED_ACCEPT has tricky prereqs (371efeb)
+ o Add caveat to docs about bufferevent_free() with data in outbuf (6fab9ee)
+ o Make it more clear that NOLOCK means "I promise, no multithreading"
+ (9444524)
+ o Fix a comment in test-fdleak after 077c7e949. (3881d8f Ross Lagerwall)
+ o Make the Makefile.nmake warning slightly less dire (e7bf4c8)
+ o Fix typo : events instead of evets (05f1aca Azat Khuzhin)
+ o Additional comments about OPENSSL_DIR variable, prompted by Dave Hart
+ (6bde2ef Patrick Pelletier)
+
+EVHTTP:
+ o ignore LWS after field-content in headers (370a2c0 Artem Germanov)
+ o Clean up rtrim implementation (aa59d80)
+ o Remove trailing tabs in HTTP headers as well. (ac42519)
+ o Remove internal ws from multiline http headers correctly (c6ff381)
+ o Move evutil_rtrim_lws_ to evutil.c where it belongs (61b93af)
+ o add evhttp_request_get_response_code_line (4f4d0c9 Jay R. Wren)
+ o Use EVUTIL_SOCKET_ERROR() wrapper to save/restore errno in
+ evhttp_connection_fail_ (7afbd60)
+ o preserve errno in evhttp_connection_fail_ for inspection by the
+ callback (36d0ee5 Patrick Pelletier)
+
+BUGFIXES:
+ o Correctly handle running on a system where accept4 doesn't work. (9fbfe9b)
+ o Avoid double-free on error in evbuffer_add_file. Found by
+ coverity. (6a81b1f)
+ o Fix another possible uninitialized read in dns regression tests. Found
+ by coverity. (13525c5)
+ o Add checks for functions in test-ratelim.c; found by Coverity (aa501e1)
+ o Avoid memory leak in test_event_calloc unit test; found by coverity
+ (92817a1)
+ o Fix a shadowed variable in addfile_test_readcb; found by coverity
+ (225344c)
+ o Check return value when using LEV_OPT_DEFERRED_ACCEPT. Found by
+ coverity (6487f63)
+ o Prevent reference leak of bufferevent if getaddrinfo fails. (b757786
+ Joachim Bauch)
+ o Make event_base_getnpriorities work with old "implicit base" code
+ (c46cb9c)
+ o Simplify and correct evutil_open_closeonexec_ (0de587f)
+ o Fix event_dlist definition when sys/queue not included (81b6209
+ Derrick Pallas)
+
+
+
+Changes in version 2.1.1-alpha (4 Apr 2012)
+
+ Libevent 2.1.1-alpha includes a number of new features and performance
+ improvements. The log below tries to organize them by rough area of
+ effect. It omits some commits which were pure bugfixes on other commits
+ listed below. For more detail, see the git changelogs. For more
+ insight, see the "whatsnew-2.1.txt" document included in the Libevent
+ 2.1.1-alpha distribution.
+
+ Performance: Core
+ o Replace several TAILQ users with LIST. LIST can be a little faster than
+ TAILQ for cases where we don't need queue-like behavior. (f9db33d,
+ 6494772, d313c29, 974d004)
+ o Disabled code to optimize the case where we reinsert an existing
+ timeout (e47042f, 09cbc3d)
+ o Remove a needless base-notify when rescheduling the first timeout (77a96fd)
+ o Save a needless comparison when removing/adjusting timeouts (dd5189b)
+ o Possible optimization: split event_queue_insert/remove into
+ separate functions. needs testing (efc4dc5)
+ o Make event_count maintenance branchless at the expense of an
+ extra shift. Needs benchmarking (d1cee3b)
+ o In the 2.1 branch, let's try out lazy gettimeofday/clock_gettime
+ comparison (2a83ecc)
+ o Optimization in event_process_active(): ignore maxcb & endtime
+ for highest priority events. (a9866aa Alexander Drozdov)
+ o Bypass event_add when using event_base_once() for a 0-sec timeout (35c5c95)
+ o Remove the eventqueue list and the ev_next pointers. (604569b 066775e)
+
+ Performance: Evbuffers
+ o Roughly 20% speed increase when line-draining a buffer using
+ EVBUFFER_EOL_CRLF (5dde0f0 Mina Naguib)
+ o Try to squeeze a little more speed out of EVBUFFER_EOL_CRLF (7b9d139)
+ o Fix a bug in the improved EOL_CRLF code (d927965)
+ o Remove a needless branch in evbuffer_drain() (d19a326)
+
+ Performance: Linux
+ o Infrastructure for using faster/fewer syscalls when creating
+ sockets (a1c042b)
+ o Minimize syscalls during socket creation in listener.c (7e9e289)
+ o Use a wrapper function to create the notification
+ pipe/socketpair/eventfd (ca76cd9)
+ o Use pipes for telling signals to main thread when possible (a35f396)
+ o Save syscalls when constructing listener sockets for evhttp (af6c9d8)
+ o Save some syscalls when creating evdns sockets (713e570)
+ o Save some syscalls when constructing a socket for a bufferevent (33fca62)
+ o Prefer epoll_create1 on Linuxen that have it (bac906c)
+
+ Performance: Epoll backend
+ o Use current event set rather than current pending change when
+ deciding whether to no-op a del (04ba27e Mike Smellie)
+ o Replace big chain of if/thens in epoll.c with a table lookup (8c83eb6)
+ o Clean up error handling in epoll_apply_one_change() a little (2d55a19)
+
+ Performance: Evport backend
+ o evport: use evmap_io to track fdinfo status. Should save time and
+ RAM. (4687ce4)
+ o evport: Remove a linear search over recent events when
+ reactivating them (0f77efe)
+ o evport: Use portev_user to remember fdinfo struct (276ec0e)
+ o evport: don't scan more events in ed_pending than needed (849a5cf)
+ o evport: Remove artificial low limit on max events per getn call (c04d927)
+ o Reenable main/many_events_slow_add for evport in 2.1 (e903db3)
+
+ Performance: Windows
+ o Use GetSystemTimeAsFileTime to implement gettimeofday on
+ win32. It's faster and more accurate than our old
+ approach. (b8b8aa5)
+
+ New functions and features: debugging
+ o Add event_enable_debug_logging() to control use of debug logs (e30a82f)
+
+ New functions and features: core
+ o Add event_config function to limit time/callbacks between calls
+ to dispatch (fd4de1e, 9fa56bd, a37a0c0, 3c63edd)
+ o New EVLOOP_NO_EXIT_ON_EMPTY option to keep looping even when no
+ events are pending (084e68f)
+ o Add event_base_get_npriorities() function. (ee3a4ee Alexander Drozdov)
+ o Make evbase_priority_init() and evbase_get_npriorities()
+ threadsafe (3c55b5e)
+ o New event_base_update_cache_time() to set cached_tv to current
+ time (212533e Abel Mathew)
+ o Add event_self_cbarg() to be used in conjunction with
+ event_new(). (ed36e6a Ross Lagerwall, fa931bb, 09a1906, 1338e6c,
+ 33e43ef)
+ o Add a new libevent_global_shutdown() to free all globals before
+ exiting. (041ca00 Mark Ellzey, f98c158, 15296d0, 55e991b)
+ o Use getifaddrs to detect our interfaces if possible (7085a45)
+ o Add event_base_get_running_event() to get the event* whose cb we
+ are in (c5732fd, 13dad99)
+
+ New functions and features: building
+ o Implement --enable-gcc-hardening configure option (7550267 Sebastian Hahn)
+
+ New functions and features: evbuffers
+ o Add evbuffer_add_file_segment() so one fd can be used efficiently
+ in more than one evbuffer_add_file at a time (e72afae, c2d9884,
+ 3f405d2, 0aad014)
+ o Fix windows file segment mappings (8254de7)
+ o Allow evbuffer_ptr_set to yield a point just after the end of the
+ buffer. (e6fe1da)
+ o Allow evbuffer_ptr to point to position 0 in an empty evbuffer
+ (7aeb2fd Nir Soffer)
+ o Set the special "not found" evbuffer_ptr consistently. (e3e97ae Nir Soffer)
+ o support adding buffers to other buffers non-destructively
+ (9d7368a Joachim Bauch)
+ o prevent nested multicast references, reworked locking (26041a8
+ Joachim Bauch)
+ o New EVBUFFER_EOL_NUL to read NUL-terminated strings from an
+ evbuffer (d7a8b36 Andrea Montefusco, 54142c9)
+ o Make evbuffer_file_segment_types adaptable (c6bbbf1)
+ o Added evbuffer_add_iovec and unit tests. (aaec5ac Mark Ellzey, 27b5398)
+ o Add evbuffer_copyout_from to copy data from the middle of a
+ buffer (27e2225)
+
+ New functions and features: bufferevents
+ o Allow users to set allow_dirty_shutdown (099d27d Catalin Patulea)
+ o Tweak allow_dirty_shutdown documentation (a44cd2b)
+ o Fix two issues in the allow_dirty_shutdown code. (f3b89de)
+ o Add a bufferevent_getcb() to find a bufferevent's current
+ callbacks (a650394)
+ o bufferevent: Add functions to set/get max_single_read/write
+ values. (998c813 Alexander Drozdov)
+ o bev_ssl: Be more specific in event callbacks. evhttp in particular gets
+ confused without at least one of BEV_EVENT_{READING|WRITING}. (f7eb69a
+ Catalin Patulea)
+
+ New functions and features: evconnlisteners
+ o Support TCP_DEFER_ACCEPT sockopts for listeners (5880e4a Mark Ellzey,
+ a270728)
+ o Add another caveat to the TCP_DEFER_ACCEPT documentation (a270728)
+ o Allow evconnlistener to be created in disabled state. (9593a33
+ Alexander Drozdov)
+ o The LEV_OPT_CLOSE_ON_EXEC flag now applies to accepted listener
+ sockets too (4970329)
+
+ Evhttp:
+ o Add new evhttp_{connection_}set_timeout_tv() functions to set
+ finger-grained http timeouts (6350e6c Constantine Verutin)
+ o Performance tweak to evhttp_parse_request_line. (aee1a97 Mark Ellzey)
+ o Add missing break to evhttp_parse_request_line (0fcc536)
+ o Add evhttp callback for bufferevent creation; this lets evhttp
+ support SSL. (8d3a850)
+ o Remove calls to deprecated bufferevent functions from evhttp.c (4d63758)
+ o evhttp: Add evhttp_foreach_bound_socket. (a2c48e3 Samy Al Bahra)
+
+ Build improvements:
+ o Add AC_USE_SYSTEM_EXTENSIONS to configure.in. Requires follow on
+ patches for correctness and robustness. (1fa7dbe Kevin Bowling)
+ o Filter '# define' statements from autoconf and generate
+ event-private.h (321b558 Kevin Bowling)
+ o Remove internal usage of _GNU_SOURCE (3b26541 Kevin Bowling)
+ o Eliminate a couple more manual internal _GNU_SOURCE defines (c51ef93
+ Kevin Bowling)
+ o Add AC_GNU_SOURCE to the fallback case. (ea8fa4c Kevin Bowling)
+ o Use a Configuration Header Template for evconfig-private.h (868f888
+ Kevin Bowling)
+ o Fix a comment warning and add evconfig-private.h to .gitignore
+ (f6d66bc Kevin Bowling)
+ o Include evconfig-private.h in internal files for great good. (0915ca0
+ Kevin Bowling)
+ o Backport libevent to vanilla Autoconf 2.59 (as used in RHEL5)
+ (ad03952 Kevin Bowling)
+ o Prefer the ./configure evconfig-private.h in MinGW, just in
+ case. (f964b72 Kevin Bowling)
+ o Shell hack for weird mkdir -p commands (fd7b5a8 Kevin Bowling)
+ o Add evconfig-private to remaining files (ded0a09 Kevin Bowling)
+ o Allow use of --enable-silent-rules for quieter compilation with
+ automake 1.11 (f1f8514 Dave Hart)
+ o Use "_WIN32", not WIN32: it's standard and we don't need to fake it
+ (9f560b)
+ o In configure, test for _WIN32 not WIN32. (85078b1 Peter Rosin)
+ o Do not define WIN32 in Makefile.nmake (d41f3ea Peter Rosin)
+ o Provide the autoconf m4 macros for the new OpenSSL via pkg-config
+ stuff. (674dc3d Harlan Stenn)
+ o Use pkg-config (if available) to handle OpenSSL. (1c63860 Harlan Stenn)
+ o We need AM_CPPFLAGS when compiling bufferevent_openssl.c (6d2613b
+ Harlan Stenn)
+ o Fix OSX build: $(OPENSSL_INCS) needs to be after
+ $(AM_CPPFLAGS). (46f1769 Zack Weinberg)
+ o Make gcc warnings on by default, and --enable-gcc-warnings only add
+ -Werror (d46517e Sebastian Hahn)
+ o Split up extra-long AC_CHECK_FUNCS/HEADERS lines in configure.in (88a30ad)
+ o Move libevent 1.x headers to include/, to put all public headers in
+ one place. (bbea8d6)
+ o Put #ifdef around some files to support alternate build
+ systems. (76d4c92 Ross Lagerwall)
+ o Also make win32select.c conditional for IDE users (bf2c5a7)
+
+ Debugging:
+ o Add a magic number to debug_locks to better catch lock-coding
+ errors. (b4a29c0 Dave Hart)
+ o munge the debug_lock signature before freeing it: it might help us
+ catch use-after-free (f28084d)
+ o Added --enable-event-debugging in configure (bc7b4e4, a9c2c9a Mark Ellzey)
+ o Debug addition for printing usec on TIMEOUT debugging. (ac43ce0 Mark Ellzey)
+ o Added usec debug in another area for debug (3baab0d Mark Ellzey)
+ o added timeout debug logs to include event ptr. (4b7d298 Mark Ellzey)
+ o more event dbg updates (6727543 Mark Ellzey)
+ o Clarify event_enable_debug_logging a little (6207826)
+ o Make --enable-verbose-debug option match its help text (10c3450)
+ o Add argument checks to some memory functions in `event.c'. (c8953d1
+ Mansour Moufid)
+
+ Testing:
+ o More abstraction in test.sh (cd74c4e)
+ o Add failing test for evbuffer_search_range. (8e26154 Nir Soffer)
+ o Tweaks to return types with end-of-buf ptrs (9ab8ab8)
+ o Add an (internal) usleep function for use by unit tests (f25d9d3)
+ o Synchronize with upstream tinytest (6c81be7)
+ o Make test-changelist faster (7622d26)
+ o Reduce the timeout in the main/fork test. (ab14f7c)
+ o New evhttp function to adjust initial retry timeout (350a3c4)
+ o Make regression tests run over 3x faster. (67a1763)
+ o Use test_timeval_diff_eq more consistently (b77b43f)
+ o Allow more slop in deferred_cb_skew test; freebsd needs it (b9f7e5f)
+ o When including an -internal.h header outside the main tree, do so
+ early (95e2455)
+ o Add a new test: test-fdleak which tests for fd leaks by creating many
+ sockets. (2ef9278 Ross Lagerwall, f7af194, 1c4288f, etc)
+ o Add a unit test for event_base_dump_events() (7afe48a, 8d08cce)
+ o Test more bufferevent_ratelim features (c24f91a)
+
+ Documentation:
+ o Improve evbuffer_ptr documentation (261ba63)
+ o added comments to describe refcounting of multicast chains (ba24f61
+ Joachim Bauch)
+ o Add doxygen for event_base_dump_events (cad5753)
+
+ OSX:
+ o Use "unlimited select" on OSX so that we can have more than
+ FD_SETSIZE fds (1fb5cc6)
+
+ KQueue:
+ o Use SIG_IGN instead of a do-nothing handler for signal events with
+ kqueue (148458e Zack Weinberg)
+
+ evprc:
+ o event_rpcgen.py now prints status information to stdout and errors to
+ stderr. (ffb0ba0 Ross Lagerwall)
+
+ Code improvement and refactoring:
+ o Make event_reinit() more robust and maintainable (272033e)
+ o Restore fast-path event_reinit() for slower backends (2c4b5de)
+ o Check changelist as part of checking representational integrity (39b3f38)
+ o Fix a compile warning in event_reinit (e4a56ed Sebastian Hahn)
+ o Refactor the functions that run over every event. (c89b4e6)
+ o Remove the last vestiges of _EVENT_USE_EVENTLIST (a3cec90)
+ o Make event-config.h depend on Makefile.am (2958a5c)
+
+ Build fixes:
+ o Don't do clang version detection when disabling some flags (083296b
+ Sebastian Hahn)
+
+ C standards conformance:
+ o Check for NULL return on win32 mm_calloc, and set ENOMEM. (af7ba69)
+ o Convert event-config.h macros to avoid reserved identifiers (68120d9)
+ o Generate event-config.h using the correct macros. (f82c57e)
+ o Convert include-guard macro convention to avoid reserved identifiers
+ (3f8c7cd)
+ o Make event_rpcgen.py output conform to identifier conventions (372bff1)
+ o Stop referring to an obsolete include guard in bench_http.h (5c0f7e0)
+ o Make the generated event-config.h use correct include guards (639383a)
+ o Fix all identifiers with names beginning with underscore. (cb9da0b)
+ o Make event_rpcgen.py output conform to identifier conventions, more
+ (bcefd24)
+ o Fix some problems introduced by automated identifier cleanup script
+ (c963534)
+ o Have all visible internal function names end with an underscore. (8ac3c4c)
+ o Apply the naming convention to our EVUTIL_IS* functions (c7848fa)
+ o Clean up lingering _identifiers. (946b584)
+ o Fix doxygen to use new macro conventions (da455e9)
+
+ Bugfixes:
+ o Do not use system EAI/AI values if we are not using the system
+ getaddrinfo. (7bcac07)
+
+ Sample Code:
+ o Fix up sample/event-test.c to use newer interfaces and make it
+ actually work. (19bab4f Ross Lagerwall)
+ o On Unix, remove event.fifo left by sample/event-test.c. (c0dacd2 Ross
+ Lagerwall)
+ o Rename event-test.c to event-read-fifo.c. (a5b370a Ross Lagerwall)
+ o event-read-fifo: Use EV_PERSIST appropriately (24dab0b)
+
+
+
+
diff --git a/libs/libevent/docs/ChangeLog-1.4 b/libs/libevent/docs/ChangeLog-1.4
new file mode 100644
index 0000000000..166d30872f
--- /dev/null
+++ b/libs/libevent/docs/ChangeLog-1.4
@@ -0,0 +1,231 @@
+Changes in 1.4.14b-stable
+ o Set the VERSION_INFO correctly for 1.4.14
+
+Changes in 1.4.14-stable
+ o Add a .gitignore file for the 1.4 branch. (d014edb)
+ o Backport evbuffer_readln(). (b04cc60 Nicholas Marriott)
+ o Make the evbuffer_readln backport follow the current API (c545485)
+ o Valgrind fix: Clear struct kevent before checking for OSX bug. (5713d5d William Ahern)
+ o Fix a crash when reading badly formatted resolve.conf (5b10d00 Yasuoka Masahiko)
+ o Fix memory-leak of signal handler array with kqueue. [backport] (01f3775)
+ o Update sample/signal-test.c to use newer APIs and not leak. (891765c Evan Jones)
+ o Correct all versions in 1.4 branch (ac0d213)
+ o Make evutil_make_socket_nonblocking() leave any other flags alone. (81c26ba Jardel Weyrich)
+ o Adjusted fcntl() retval comparison on evutil_make_socket_nonblocking(). (5f2e250 Jardel Weyrich)
+ o Correct a debug message in evhttp_parse_request_line (35df59e)
+ o Merge branch 'readln-backport' into patches-1.4 (8771d5b)
+ o Do not send an HTTP error when we've already closed or responded. (4fd2dd9 Pavel Plesov)
+ o Re-add event_siglcb; some old code _was_ still using it. :( (bd03d06)
+ o Make Libevent 1.4 build on win32 with Unicode enabled. (bce58d6 Brodie Thiesfield)
+ o Distribute nmake makefile for 1.4 (20d706d)
+ o do not fail while sending on http connections the client closed. (5c8b446)
+ o make evhttp_send() safe against terminated connections, too (01ea0c5)
+ o Fix a free(NULL) in min_heap.h (2458934)
+ o Fix memory leak when setting up priorities; reported by Alexander Drozdov (cb1a722)
+ o Clean up properly when adding a signal handler fails. (ae6ece0 Gilad Benjamini)
+ o Do not abort HTTP requests missing a reason string. (29d7b32 Pierre Phaneuf)
+ o Fix compile warning in http.c (906d573)
+ o Define _REENTRANT as needed on Solaris, elsewhere (6cbea13)
+
+
+Changes in 1.4.13-stable:
+ o If the kernel tells us that there are a negative number of bytes to read from a socket, do not believe it. Fixes bug 2841177; found by Alexander Pronchenkov.
+ o Do not allocate the maximum event queue and fd array for the epoll backend at startup. Instead, start out accepting 32 events at a time, and double the queue's size when it seems that the OS is generating events faster than we're requesting them. Saves up to 512K per epoll-based event_base. Resolves bug 2839240.
+ o Fix compilation on Android, which forgot to define fd_mask in its sys/select.h
+ o Do not drop data from evbuffer when out of memory; reported by Jacek Masiulaniec
+ o Rename our replacement compat/sys/_time.h header to avoid build a conflict on HPUX; reported by Kathryn Hogg.
+ o Build kqueue.c correctly on GNU/kFreeBSD platforms. Patch pulled upstream from Debian.
+ o Fix a problem with excessive memory allocation when using multiple event priorities.
+ o When running set[ug]id, don't check the environment. Based on a patch from OpenBSD.
+
+
+Changes in 1.4.12-stable:
+ o Try to contain degree of failure when running on a win32 version so heavily firewalled that we can't fake a socketpair.
+ o Fix an obscure timing-dependent, allocator-dependent crash in the evdns code.
+ o Use __VA_ARGS__ syntax for varargs macros in event_rpcgen when compiler is not GCC.
+ o Activate fd events in a pseudorandom order with O(N) backends, so that we don't systematically favor low fds (select) or earlier-added fds (poll, win32).
+ o Fix another pair of fencepost bugs in epoll.c. [Patch from Adam Langley.]
+ o Do not break evdns connections to nameservers when our IP changes.
+ o Set truncated flag correctly in evdns server replies.
+ o Disable strict aliasing with GCC: our code is not compliant with it.
+
+Changes in 1.4.11-stable:
+ o Fix a bug when removing a timeout from the heap. [Patch from Marko Kreen]
+ o Remove the limit on size of HTTP headers by removing static buffers.
+ o Fix a nasty dangling pointer bug in epoll.c that could occur after epoll_recalc(). [Patch from Kevin Springborn]
+ o Distribute Win32-Code/event-config.h, not ./event-config.h
+
+Changes in 1.4.10-stable:
+ o clean up buffered http connection data on reset; reported by Brian O'Kelley
+ o bug fix and potential race condition in signal handling; from Alexander Drozdov
+ o rename the Solaris event ports backend to evport
+ o support compilation on Haiku
+ o fix signal processing when a signal callback delivers a signal; from Alexander Drozdov
+ o const-ify some arguments to evdns functions.
+ o off-by-one error in epoll_recalc; reported by Victor Goya
+ o include Doxyfile in tar ball; from Jeff Garzik
+ o correctly parse queries with encoded \r, \n or + characters
+
+Changes in 1.4.9-stable:
+ o event_add would not return error for some backends; from Dean McNamee
+ o Clear the timer cache on entering the event loop; reported by Victor Chang
+ o Only bind the socket on connect when a local address has been provided; reported by Alejo Sanchez
+ o Allow setting of local port for evhttp connections to support millions of connections from a single system; from Richard Jones.
+ o Clear the timer cache when leaving the event loop; reported by Robin Haberkorn
+ o Fix a typo in setting the global event base; reported by lance.
+ o Fix a memory leak when reading multi-line headers
+ o Fix a memory leak by not running explicit close detection for server connections
+
+Changes in 1.4.8-stable:
+ o Match the query in DNS replies to the query in the request; from Vsevolod Stakhov.
+ o Fix a merge problem in which name_from_addr returned pointers to the stack; found by Jiang Hong.
+ o Do not remove Accept-Encoding header
+
+Changes in 1.4.7-stable:
+ o Fix a bug where headers arriving in multiple packets were not parsed; fix from Jiang Hong; test by me.
+
+Changes in 1.4.6-stable:
+ o evutil.h now includes <stdarg.h> directly
+ o switch all uses of [v]snprintf over to evutil
+ o Correct handling of trailing headers in chunked replies; from Scott Lamb.
+ o Support multi-line HTTP headers; based on a patch from Moshe Litvin
+ o Reject negative Content-Length headers; anonymous bug report
+ o Detect CLOCK_MONOTONIC at runtime for evdns; anonymous bug report
+ o Fix a bug where deleting signals with the kqueue backend would cause subsequent adds to fail
+ o Support multiple events listening on the same signal; make signals regular events that go on the same event queue; problem report by Alexander Drozdov.
+ o Deal with evbuffer_read() returning -1 on EINTR|EAGAIN; from Adam Langley.
+ o Fix a bug in which the DNS server would incorrectly set the type of a cname reply to a.
+ o Fix a bug where setting the timeout on a bufferevent would take not effect if the event was already pending.
+ o Fix a memory leak when using signals for some event bases; reported by Alexander Drozdov.
+ o Add libevent.vcproj file to distribution to help with Windows build.
+ o Fix a problem with epoll() and reinit; problem report by Alexander Drozdov.
+ o Fix off-by-one errors in devpoll; from Ian Bell
+ o Make event_add not change any state if it fails; reported by Ian Bell.
+ o Do not warn on accept when errno is either EAGAIN or EINTR
+
+Changes in 1.4.5-stable:
+ o Fix connection keep-alive behavior for HTTP/1.0
+ o Fix use of freed memory in event_reinit; pointed out by Peter Postma
+ o Constify struct timeval * where possible; pointed out by Forest Wilkinson
+ o allow min_heap_erase to be called on removed members; from liusifan.
+ o Rename INPUT and OUTPUT to EVRPC_INPUT and EVRPC_OUTPUT. Retain INPUT/OUTPUT aliases on on-win32 platforms for backwards compatibility.
+ o Do not use SO_REUSEADDR when connecting
+ o Fix Windows build
+ o Fix a bug in event_rpcgen when generated fixed-sized entries
+
+Changes in 1.4.4-stable:
+ o Correct the documentation on buffer printf functions.
+ o Don't warn on unimplemented epoll_create(): this isn't a problem, just a reason to fall back to poll or select.
+ o Correctly handle timeouts larger than 35 minutes on Linux with epoll.c. This is probably a kernel defect, but we'll have to support old kernels anyway even if it gets fixed.
+ o Fix a potential stack corruption bug in tagging on 64-bit CPUs.
+ o expose bufferevent_setwatermark via header files and fix high watermark on read
+ o fix a bug in bufferevent read water marks and add a test for them
+ o introduce bufferevent_setcb and bufferevent_setfd to allow better manipulation of bufferevents
+ o use libevent's internal timercmp on all platforms, to avoid bugs on old platforms where timercmp(a,b,<=) is buggy.
+ o reduce system calls for getting current time by caching it.
+ o fix evhttp_bind_socket() so that multiple sockets can be bound by the same http server.
+ o Build test directory correctly with CPPFLAGS set.
+ o Fix build under Visual C++ 2005.
+ o Expose evhttp_accept_socket() API.
+ o Merge windows gettimeofday() replacement into a new evutil_gettimeofday() function.
+ o Fix autoconf script behavior on IRIX.
+ o Make sure winsock2.h include always comes before windows.h include.
+
+Changes in 1.4.3-stable:
+ o include Content-Length in reply for HTTP/1.0 requests with keep-alive
+ o Patch from Tani Hosokawa: make some functions in http.c threadsafe.
+ o Do not free the kqop file descriptor in other processes, also allow it to be 0; from Andrei Nigmatulin
+ o make event_rpcgen.py generate code include event-config.h; reported by Sam Banks.
+ o make event methods static so that they are not exported; from Andrei Nigmatulin
+ o make RPC replies use application/octet-stream as mime type
+ o do not delete uninitialized timeout event in evdns
+
+Changes in 1.4.2-rc:
+ o remove pending timeouts on event_base_free()
+ o also check EAGAIN for Solaris' event ports; from W.C.A. Wijngaards
+ o devpoll and evport need reinit; tested by W.C.A Wijngaards
+ o event_base_get_method; from Springande Ulv
+ o Send CRLF after each chunk in HTTP output, for compliance with RFC2626. Patch from "propanbutan". Fixes bug 1894184.
+ o Add a int64_t parsing function, with unit tests, so we can apply Scott Lamb's fix to allow large HTTP values.
+ o Use a 64-bit field to hold HTTP content-lengths. Patch from Scott Lamb.
+ o Allow regression code to build even without Python installed
+ o remove NDEBUG ifdefs from evdns.c
+ o update documentation of event_loop and event_base_loop; from Tani Hosokawa.
+ o detect integer types properly on platforms without stdint.h
+ o Remove "AM_MAINTAINER_MODE" declaration in configure.in: now makefiles and configure should get re-generated automatically when Makefile.am or configure.in chanes.
+ o do not insert event into list when evsel->add fails
+
+Changes in 1.4.1-beta:
+ o free minheap on event_base_free(); from Christopher Layne
+ o debug cleanups in signal.c; from Christopher Layne
+ o provide event_base_new() that does not set the current_base global
+ o bufferevent_write now uses a const source argument; report from Charles Kerr
+ o better documentation for event_base_loopexit; from Scott Lamb.
+ o Make kqueue have the same behavior as other backends when a signal is caught between event_add() and event_loop(). Previously, it would catch and ignore such signals.
+ o Make kqueue restore signal handlers correctly when event_del() is called.
+ o provide event_reinit() to reintialize an event_base after fork
+ o small improvements to evhttp documentation
+ o always generate Date and Content-Length headers for HTTP/1.1 replies
+ o set the correct event base for HTTP close events
+ o New function, event_{base_}loopbreak. Like event_loopexit, it makes an event loop stop executing and return. Unlike event_loopexit, it keeps subsequent pending events from getting executed. Patch from Scott Lamb
+ o Removed obsoleted recalc code
+ o pull setters/getters out of RPC structures into a base class to which we just need to store a pointer; this reduces the memory footprint of these structures.
+ o fix a bug with event_rpcgen for integers
+ o move EV_PERSIST handling out of the event backends
+ o support for 32-bit tag numbers in rpc structures; this is wire compatible, but changes the API slightly.
+ o prefix {encode,decode}_tag functions with evtag to avoid collisions
+ o Correctly handle DNS replies with no answers set (Fixes bug 1846282)
+ o The configure script now takes an --enable-gcc-warnigns option that turns on many optional gcc warnings. (Nick has been building with these for a while, but they might be useful to other developers.)
+ o When building with GCC, use the "format" attribute to verify type correctness of calls to printf-like functions.
+ o removed linger from http server socket; reported by Ilya Martynov
+ o allow \r or \n individually to separate HTTP headers instead of the standard "\r\n"; from Charles Kerr.
+ o demote most http warnings to debug messages
+ o Fix Solaris compilation; from Magne Mahre
+ o Add a "Date" header to HTTP responses, as required by HTTP 1.1.
+ o Support specifying the local address of an evhttp_connection using set_local_address
+ o Fix a memory leak in which failed HTTP connections would not free the request object
+ o Make adding of array members in event_rpcgen more efficient, but doubling memory allocation
+ o Fix a memory leak in the DNS server
+ o Fix compilation when DNS_USE_OPENSSL_FOR_ID is enabled
+ o Fix buffer size and string generation in evdns_resolve_reverse_ipv6().
+ o Respond to nonstandard DNS queries with "NOTIMPL" rather than by ignoring them.
+ o In DNS responses, the CD flag should be preserved, not the TC flag.
+ o Fix http.c to compile properly with USE_DEBUG; from Christopher Layne
+ o Handle NULL timeouts correctly on Solaris; from Trond Norbye
+ o Recalculate pending events properly when reallocating event array on Solaris; from Trond Norbye
+ o Add Doxygen documentation to header files; from Mark Heily
+ o Add a evdns_set_transaction_id_fn() function to override the default
+ transaction ID generation code.
+ o Add an evutil module (with header evutil.h) to implement our standard cross-platform hacks, on the theory that somebody else would like to use them too.
+ o Fix signals implementation on windows.
+ o Fix http module on windows to close sockets properly.
+ o Make autogen.sh script run correctly on systems where /bin/sh isn't bash. (Patch from Trond Norbye, rewritten by Hagne Mahre and then Hannah Schroeter.)
+ o Skip calling gettime() in timeout_process if we are not in fact waiting for any events. (Patch from Trond Norbye)
+ o Make test subdirectory compile under mingw.
+ o Fix win32 buffer.c behavior so that it is correct for sockets (which do not like ReadFile and WriteFile).
+ o Make the test.sh script run unit tests for the evpoll method.
+ o Make the entire evdns.h header enclosed in "extern C" as appropriate.
+ o Fix implementation of strsep on platforms that lack it
+ o Fix implementation of getaddrinfo on platforms that lack it; mainly, this will make Windows http.c work better. Original patch by Lubomir Marinov.
+ o Fix evport implementation: port_disassociate called on unassociated events resulting in bogus errors; more efficient memory management; from Trond Norbye and Prakash Sangappa
+ o support for hooks on rpc input and output; can be used to implement rpc independent processing such as compression or authentication.
+ o use a min heap instead of a red-black tree for timeouts; as a result finding the min is a O(1) operation now; from Maxim Yegorushkin
+ o associate an event base with an rpc pool
+ o added two additional libraries: libevent_core and libevent_extra in addition to the regular libevent. libevent_core contains only the event core whereas libevent_extra contains dns, http and rpc support
+ o Begin using libtool's library versioning support correctly. If we don't mess up, this will more or less guarantee binaries linked against old versions of libevent continue working when we make changes to libevent that do not break backward compatibility.
+ o Fix evhttp.h compilation when TAILQ_ENTRY is not defined.
+ o Small code cleanups in epoll_dispatch().
+ o Increase the maximum number of addresses read from a packet in evdns to 32.
+ o Remove support for the rtsig method: it hasn't compiled for a while, and nobody seems to miss it very much. Let us know if there's a good reason to put it back in.
+ o Rename the "class" field in evdns_server_request to dns_question_class, so that it won't break compilation under C++. Use a macro so that old code won't break. Mark the macro as deprecated.
+ o Fix DNS unit tests so that having a DNS server with broken IPv6 support is no longer cause for aborting the unit tests.
+ o Make event_base_free() succeed even if there are pending non-internal events on a base. This may still leak memory and fds, but at least it no longer crashes.
+ o Post-process the config.h file into a new, installed event-config.h file that we can install, and whose macros will be safe to include in header files.
+ o Remove the long-deprecated acconfig.h file.
+ o Do not require #include <sys/types.h> before #include <event.h>.
+ o Add new evutil_timer* functions to wrap (or replace) the regular timeval manipulation functions.
+ o Fix many build issues when using the Microsoft C compiler.
+ o Remove a bash-ism in autogen.sh
+ o When calling event_del on a signal, restore the signal handler's previous value rather than setting it to SIG_DFL. Patch from Christopher Layne.
+ o Make the logic for active events work better with internal events; patch from Christopher Layne.
+ o We do not need to specially remove a timeout before calling event_del; patch from Christopher Layne.
diff --git a/libs/libevent/docs/ChangeLog-2.0 b/libs/libevent/docs/ChangeLog-2.0
new file mode 100644
index 0000000000..a925d33b18
--- /dev/null
+++ b/libs/libevent/docs/ChangeLog-2.0
@@ -0,0 +1,1280 @@
+Changes in version 2.0.21-stable (18 Nov 2012)
+BUGFIXES:
+ o ssl: Don't discard SSL read event when timeout and read come close together (576b29f)
+ o ssl: Stop looping in "consider_reading" if reading is suspended. (f719b8a Joachim Bauch)
+ o ssl: No need to reserve space if reading is suspended. (1acf2eb Joachim Bauch)
+ o dns: Avoid a memory-leak on OOM in evdns. (73e85dd, f2bff75 George Danchev)
+ o build: Use python2 rather than python (0eb0109 Ross Lagerwall)
+ o build: Compile without warnings on mingw64 (94866c2)
+ o build: Fix compilation on mingw64 with -DUSE_DEBUG (62bd2c4)
+ o build: Make rpcgen_wrapper.sh work on systems without a "python2" binary (f3009e4)
+ o iocp: Close IOCP listener socket on free when LEV_OPT_CLOSE_ON_FREE is set (cb853ea Juan Pablo Fernandez)
+ o core: Avoid crash when event_pending() called with no event_base set on event (e3cccf3)
+ o misc: remove stray 'x' so print_err will compile when uncommented (ac35650 Patrick Pelletier)
+ o tests: Fix renegotiation test to work around openssl 1.0.1 bug (c2f3086)
+ o tests: Warn when openssl version in unit test mismatches compiled version. (ac009f9)
+
+
+Changes in version 2.0.20-stable (23 Aug 2012)
+BUGFIXES:
+ o core: Make event_pending() threadsafe. (be7a95c Simon Liu)
+ o win32: avoid crash when waiting forever on zero fds. (160e58b)
+ o evhttp: Fix a memory leak on error in evhttp_uriencode (11c8b31)
+ o evbuffer: Avoid possible needless call to writev. Found by coverity. (6a4ec5c)
+ o evdns: memset sockaddr_in before using it. Found by coverity. (a1a0e67)
+ o evhttp: Check more setsockopt return values when binding sockets. Found by coverity (a0912e3)
+ o evdns: Avoid segfault on weird timeout during name lookup. (dc32077 Greg Hazel)
+ o bufferevent_ssl: Correctly invoke callbacks when a SSL bufferevent reads some and then blocks. (606ac43)
+
+
+PORTABILITY FIXES:
+ o check for arc4random_buf at runtime, on OS X (bff5f94 Greg Hazel)
+ o Correctly check for arc4random_buf (fcec3e8 Sebastian Hahn)
+ o Add explicit AC_PROG_SED to configure.in so all autoconfs will expose $(SED) (ca80ea6)
+
+BUILD FIXES:
+ o Add GCC annotations so that the vsprintf functions get checked properly (117e327)
+ o Fix an unused variable warning on *BSD. (c0720c1)
+
+UNIT TEST FIXES:
+ o Fix a couple of memory leaks (found with Valgrind). (3b2529a Ross Lagerwall)
+ o Remove deadcode in http regression tests. Found by coverity. (5553346)
+ o Fix possible uninitialized read in dns regression tests. Found by coverity. (2259777)
+ o Set umask before calling mkstemp in unit tests. Found by coverity (f1ce15d)
+ o Fix various check-after-dereference issues in unit tests: found by coverity (4f3732d)
+ o Fix resource leaks in the unit tests; found by coverity (270f279)
+ o Add some missing null checks to unit tests; found by coverity (f021c3d)
+ o Avoid more crashes/bad calls in unit tests; found by coverity (3cde5bf)
+ o Remove unused variable; spotted by coverity (6355b2a)
+ o Add checks to various return values in unit tests. Found by coverity (b9e7329)
+ o Move assignment outside tt_assert in ssl unit tests. Appeases coverity. (a2006c0)
+
+
+
+Changes in version 2.0.19-stable (3 May 2012)
+BUGFIXES (CORE):
+ o Refactor event_persist_closure: raise and extract some common logic (bec22b4)
+ o If time has jumped so we'd reschedule a periodic event in the past, schedule it for the future instead (dfd808c)
+ o If a higher-priority event becomes active, don't continue running events of the current priority. (2bfda40)
+
+BUGFIXES (SSL):
+ o Fixed potential double-readcb execution with openssl bufferevents. (4e62cd1 Mark Ellzey)
+
+BUGFIXES (DNS):
+ o Cancel a probe request when the server is freed, and ignore cancelled probe callbacks (94d2336 Greg Hazel)
+ o Remove redundant DNS_ERR_CANCEL check, move comment (46b8060 Greg Hazel)
+ o When retransmitting a timed-out DNS request, pick a fresh nameserver. (3d9e52a)
+
+DOCUMENTATION FIXES:
+ o Fix a typo in the bufferevent documentation (98e9119)
+ o Add missing ) to changelog; spotted by rransom (4c7ee6b)
+ o Fix the website URL in the readme (f775521)
+
+COMPILATION FIXES:
+ o Fix a compilation error with MSVC 2005 due to use of mode_t (336dcae)
+ o Configure with gcc older than 2.95 (4a6fd43 Sebastian Hahn)
+ o Generate event-config.h with a single sed script (30b6f88 Zack Weinberg)
+
+FORWARD-COMPATIBILITY:
+ o Backport: provide EVENT_LOG_* names, and deprecate _EVENT_LOG_* (d1a03b2)
+
+TESTING/DEBUGGING SUPPORT:
+ o dns-example.c can now take a resolv.conf file on the commandline (6610fa5)
+ o Make some evdns.c debug logs more verbose (d873d67)
+ o Work-around a stupid gcov-breaking bug in OSX 10.6 (b3887cd)
+
+
+
+Changes in version 2.0.18-stable (22 Mar 2012)
+BUGFIXES (core):
+ o Make uses of open() close-on-exec safe by introducing an internal evutil_open_closeonexec. (d2b5f72 Ross Lagerwall, 03dce42)
+
+BUGFIXES (kqueue):
+ o Properly zero the kevent in kq_setup_kevent() (c2c7b39 Sebastian Hahn)
+
+BUILD FIXES:
+ o Added OPENSSL_LDFLAGS env variable which is appended to SSL checks. (9278196 Mark Ellzey)
+ o Changed OPENSSL_LDFLAGS to OPENSSL_LIBADD (2d67b63 Mark Ellzey)
+ o Don't do clang version detection when disabling some flags (083296b Sebastian Hahn)
+
+BUGFIXES (dns):
+ o Stop crashing in evdns when nameserver probes give a weird error (bec5068)
+
+
+Changes in version 2.0.17-stable (10 Feb 2012)
+
+BUGFIXES (core):
+ o Be absolutely sure to clear pncalls before leaving event_signal_closure (11f36a5)
+ o check for sysctl before we use it (358c745 Mike Frysinger)
+ o Remove bogus casts of socket to int before calling ev_callback (f032516)
+ o Make evconnlistener work around bug in older Linux when getting nmapped (ecfc720)
+ o Fix a list corruption bug when using event_reinit() with signals present (6e41cdc)
+ o Fix a fd leak in event_reinit() (3f18ad1)
+ o Do a memberwise comparison of threading function tables (c94a5f2 Nate R)
+ o Use C-style comments in C source files (for compatibility with compilers such as xlc on AIX). (d84d917 Greg Hewgill)
+ o Avoid crash when freeing event_iocp and using event_set_mem_functions (19715a6)
+ o In the kqueue backend, do not report EBADF as an EV_READ (5d7bfa1 Nicholas Marriott)
+
+BUGFIXES (evbuffer and bufferevents):
+ o Fix behavior of evbuffer_peek(buf,-1,NULL,NULL,0) (c986f23 Zack Weinberg)
+ o Loop on filtering SSL reads until we are blocked or exhausted. (5b4b812)
+
+BUGFIXES (evhttp):
+ o Force strict validation of HTTP version in response. (790f6b3 Catalin Patulea)
+
+BUGFIXES (evdns):
+ o evdns: fix a bug in circular-queue implementation (d6094b1)
+
+BUILD FIXES:
+ o Fix a silly compilation error with the sun compiler (1927776 Colin Watt)
+ o Suppress a gcc warning from ignoring fwrite return in http-sample.c (7206e8c)
+
+DOCUMENTATION FIXES:
+ o Slightly clarify evbuffer_peek documentation (7bbf6ca)
+ o Update copyright notices to 2012 (e49e289)
+
+NEW APIS:
+ o Backport evhttp_connection_get_bufferevent to Libevent 2.0 (da70fa7 Arno Bakker)
+
+TESTS AND TEST FIXES:
+ o Fix a race condition in the dns/bufferevent_connect_hostname test. (cba48c7)
+ o Add function to check referential integrity of an event_base (27737d5)
+ o Check event_base correctness at end of each unit test (3312b02)
+ o Workaround in the unit tests for an apparent epoll bug in Linux 3.2 (dab9187)
+ o Better workaround for Linux 3.2 edge-triggered epoll bug (9f9e259)
+
+Changes in version 2.0.16-stable (18 Nov 2011)
+BUGFIXES (core):
+ o More detailed message in case of libevent self-debugging failure. (9e6a4ef Leonid Evdokimov)
+ o epoll: close fd on alloc fail at initialization (1aee718 Jamie Iles)
+ o Fix compile warning from saying event2/*.h inside a comment (447b0ba)
+ o Warn when unable to construct base because of failing make_base_notifiable (4e797f3)
+ o Don't try to make notifiable event_base when no threading fns are configured (e787413)
+
+BUGFIXES (evbuffer):
+ o unit test for remove_buffer bug (90bd620 Greg Hazel)
+ o Fix an evbuffer crash in evbuffer_remove_buffer() (c37069c)
+
+BUGFIXES (bufferevent_openssl):
+ o Refactor amount-to-read calculations in buffervent_ssl consider_reading() (a186e73 Mark Ellzey)
+ o Move SSL rate-limit enforcement into bytes_to_read() (96c562f)
+ o Avoid spinning on OpenSSL reads (2aa036f Mark Ellzey)
+
+BUGFIXES (dns)
+ o Empty DNS reply with OK status is another way to say NODATA. (21a08d6 Leonid Evdokimov)
+
+TESTING:
+ o Tests for 94fba5b and f72e8f6 (d58c15e Leonid Evdokimov)
+ o Test for commit aff6ba1 (f7841bf Leonid Evdokimov)
+ o Style and comment tweaks for dns/leak* tests (5e42202)
+ o improve test to remove at least one buffer from src (7eb52eb Greg Hazel)
+
+DOCUMENTATION:
+ o Add note about evhttp_send_reply_end to its doxygen (724bfb5)
+ o Update copyright dates to 2011. (3c824bd)
+ o Fix typo in whatsnew-2.0.txt (674bc6a Mansour Moufid)
+ o Improve win32 behavior of dns-sample.c code (a3f320e Gisle Vanem)
+
+
+
+Changes in version 2.0.15-stable (12 Oct 2011)
+BUGFIXES (DNS):
+ o DNS: add ttl for negative answers using RFC 2308 idea. (f72e8f6 Leonid Evdokimov)
+ o Add DNS_ERR_NODATA error code to handle empty replies. (94fba5b Leonid Evdokimov)
+
+BUFGIXES (bufferevents and evbuffers):
+ o Make evbuffer callbacks get the right n_added value after evbuffer_add (1ef1f68 Alex)
+ o Prefer mmap to sendfile unless a DRAINS_TO_FD flag is set. Allows add_file to work with SSL. (0ba0af9)
+
+BUGFIXES (event loop):
+ o When a signal callback is activated to run multiple times, allow event_base_loopbreak to work even before they all have run. (4e8eb6a)
+
+DOCUMENTATION FIXES:
+ o Fix docstring in dns.h (2b6eae5 Leonid Evdokimov)
+ o refer to non-deprecated evdns functions in comments (ba5c27d Greg Hazel)
+
+BUILD AND TESTING FIXES:
+ o le-proxy and regress depend on openssl directly (9ae061a Sergey Avseyev)
+ o Use _SOURCES, not _sources, in sample/Makefile.am (7f82382)
+ o Fixed compiler warnings for unchecked read/write calls. (c3b62fd Mark Ellzey)
+ o Make write-checking fixes use tt_fail_perror (2b76847)
+ o Fix some "value never used" warnings with gcc 4.6.1 (39c0cf7)
+
+
+
+Changes in version 2.0.14-stable (31 Aug 2011)
+BUGFIXES (bufferevents and evbuffers):
+ o Propagate errors on the underlying bufferevent to the user. (4a34394 Joachim Bauch)
+ o Ignore OpenSSL deprecation warnings on OS X (5d1b255 Sebastian Hahn)
+ o Fix handling of group rate limits under 64 bytes of burst (6d5440e)
+ o Solaris sendfile: correctly detect amount of data sent (643922e Michael Herf)
+ o Make rate limiting work with common_timeout logic (5b18f13)
+ o clear read watermark on underlying bufferevent when creating filtering bev to fix potentially failing fragmented ssl handshakes (54f7e61 Joachim Bauch)
+
+BUGFIXES (IOCP):
+ o IOCP: don't launch reads or writes on an unconnected socket (495c227)
+ o Make IOCP rate-limiting group support stricter and less surprising. (a98da7b)
+ o Have test-ratelim.c support IOCP (0ff2c5a)
+ o Make overlapped reads result in evbuffer callbacks getting invoked (6acfbdd)
+ o Correctly terminate IO on an async bufferevent on bufferevent_free (e6af35d)
+
+BUGFIXES (other):
+ o Fix evsig_dealloc memory leak with debugging turned on. (9b724b2 Leonid Evdokimov)
+ o Fix request_finished memory leak with debugging turned on. (aff6ba1 Leonid Evdokimov)
+
+BUILD AND TESTING FIXES:
+ o Allow OS-neutral builds for platforms where some versions have arc4random_buf (b442302 Mitchell Livingston)
+ o Try to fix 'make distcheck' errors when building out-of-tree (04656ea Dave Hart)
+ o Clean up some problems identified by Coverity. (7c11e51 Harlan Stenn)
+
+
+Changes in version 2.0.13-stable (18 Jul 2011)
+BUGFIXES
+ o Avoid race-condition when initializing global locks (b683cae)
+ o Fix bug in SSL bufferevents backed by a bev with a write high-watermarks (e050703 Joachim Bauch)
+ o Speed up invoke_callbacks on evbuffers when there are no callbacks (f87f568 Mark Ellzey)
+ o Avoid a segfault when all methods are disabled or broken (27ce38b)
+ o Fix incorrect results from evbuffer_search_eol(EOL_LF) (4461f1a)
+ o Add some missing checks for mm_calloc failures (89d5e09)
+ o Replace an assertion for event_base_free(NULL) with a check-and-warn (09fe97d)
+ o Report kqueue ebadf, epipe, and eperm as EV_READ events (1fd34ab)
+ o Check if the `evhttp_new_object' function in `http.c' returns NULL. (446cc7a Mansour Moufid)
+ o Use the correct printf args when formatting size_t (3203f88)
+ o Complain if the caller tries to change threading cbs after setting them (cb6ecee)
+
+DOCUMENTATION FIXES AND IMPROVEMENTS
+ o Revise the event/evbuffer/bufferevent doxygen for clarity and accuracy (2888fac)
+ o Update Doxyfile to produce more useful output (aea0555)
+
+TEST FIXES
+ o Fix up test_evutil_snprintf (caf695a)
+ o Fix tinytest invocation from windows shell (57def34 Ed Day)
+
+BUILD FIXES
+ o Use AM_CPPFLAGS in sample/Makefile.am, not AM_CFLAGS (4a5c82d)
+ o Fix select.c compilation on systems with no NFDBITS (49d1136)
+ o Fix a few warnings on OpenBSD (8ee9f9c Nicholas Marriott)
+ o Don't break when building tests from git without python installed (b031adf)
+ o Don't install event_rpcgen.py when --disable-libevent-install is used (e23cda3 Harlan Stenn)
+ o Fix AIX build issue with TAILQ_FOREACH definition (e934096)
+
+
+Changes in version 2.0.12-stable (4 Jun 2011)
+BUGFIXES
+ o Fix a warn-and-fail bug in kqueue by providing kevent() room to report errors (28317a0)
+ o Fix an assert-inducing fencepost bug in the select backend (d90149d)
+ o Fix failing http assertion introducd in commit 0d6622e (0848814 Kevin Ko)
+ o Fix a bug that prevented us from configuring IPv6 nameservers. (74760f1)
+ o Prevent size_t overflow in evhttp_htmlescape. (06c51cd Mansour Moufid)
+ o Added several checks for under/overflow conditions in evhttp_handle_chunked_read (a279272 Mark Ellzey)
+ o Added overflow checks in evhttp_read_body and evhttp_get_body (84560fc Mark Ellzey)
+
+DOCUMENTATION:
+ o Add missing words to EVLOOP_NONBLOCK documentation (9556a7d)
+
+BUILD FIXES
+ o libssl depends on libcrypto, not the other way around. (274dd03 Peter Rosin)
+ o Libtool brings in the dependencies of libevent_openssl.la automatically (7b819f2 Peter Rosin)
+ o Use OPENSSL_LIBS in Makefile.am (292092e Sebastian Hahn)
+ o Move the win32 detection in configure.in (ceb03b9 Sebastian Hahn)
+ o Correctly detect openssl on windows (6619385 Sebastian Hahn)
+ o Fix a compile warning with zlib 1.2.4 and 1.2.5 (5786b91 Sebastian Hahn)
+ o Fix compilation with GCC 2, which had no __builtin_expect (09d39a1 Dave Hart)
+ o Fix new warnings from GCC 4.6 (06a714f)
+ o Link with -lshell32 and -ladvapi32 on Win32. (86090ee Peter Rosin)
+ o Make the tests build when OpenSSL is not available. (07c41be Peter Rosin)
+ o Bring in the compile script from automake, if needed. (f3c7a4c Peter Rosin)
+ o MSVC does not provide S_ISDIR, so provide it manually. (70be7d1 Peter Rosin)
+ o unistd.h and sys/time.h might not exist. (fe93022 Peter Rosin)
+ o Make sure TINYTEST_LOCAL is defined when building tinytest.c (8fa030c Peter Rosin)
+ o Fix winsock2.h #include issues with MSVC (3d768dc Peter Rosin)
+ o Use evutil_gettimeofday instead of relying on the system gettimeofday. (0de87fe Peter Rosin)
+ o Always use evutil_snprintf, even if OS provides it (d1b2d11 Sebastian Hahn)
+ o InitializeCriticalSectionAndSpinCount requires _WIN32_WINNT >= 0x0403. (816115a Peter Rosin)
+ o cygwin: make it possible to build DLLs (d54d3fc)
+
+
+
+Changes in version 2.0.11-stable (27 Apr 2011)
+ [Autogenerated from the Git log, sorted and cleaned by hand.]
+BUGFIXES:
+ o Fix evport handling of POLLHUP and POLLERR (b42ce4b)
+ o Fix compilation on Windows with NDEBUG (cb8059d)
+ o Check for POLLERR, POLLHUP and POLLNVAL for Solaris event ports (0144886 Trond Norbye)
+ o Detect and handle more allocation failures. (666b096 Jardel Weyrich)
+ o Use event_err() only if the failure is truly unrecoverable. (3f8d22a Jardel Weyrich)
+ o Handle resize failures in the select backend better. (83e805a)
+ o Correctly free selectop fields when select_resize fails in select_init (0c0ec0b)
+ o Make --enable-gcc-warnings a no-op if not using gcc (3267703)
+ o Fix a type error in our (unused) arc4random_stir() (f736198)
+ o Correctly detect and stop non-chunked http requests when the body is too long (63a715e)
+ o Have event_base_gettimeofday_cached() always return wall-clock time (a459ef7)
+ o Workaround for http crash bug 3078187 (5dc5662 Tomash Brechko)
+ o Fix incorrect assertions and possible use-after-free in evrpc_free() (4b8f02f Christophe Fillot)
+ o Reset outgoing http connection when read data in idle state. (272823f Tomash Brechko)
+ o Fix subtle recursion in evhttp_connection_cb_cleanup(). (218cf19 Tomash Brechko)
+ o Fix the case when failed evhttp_make_request() leaved request in the queue. (0d6622e Tomash Brechko)
+ o Fix a crash bug in evdns server circular list code (00e91b3)
+ o Handle calloc failure in evdns. (Found by Dave Hart) (364291e)
+ o Fix a memory leak on win32 socket->event map. (b4f89f0)
+ o Add a forgotten NULL check to evhttp_parse_headers (12311ff Sebastian Hahn)
+ o Fix possible NULL-deref in evdns_cancel_request (5208544 Sebastian Hahn)
+
+PORTABILITY:
+ o Fall back to sscanf if we have no other way to implement strtoll (453317b)
+ o Build correctly on platforms without sockaddr_storage (9184563)
+ o Try to build correctly on platforms with no IPv6 support (713c254)
+ o Build on systems without AI_PASSIVE (cb92113)
+ o Fix http unit test on non-windows platforms without getaddrinfo (6092f12)
+ o Do not check for gethostbyname_r versions if we have getaddrinfo (c1260b0)
+ o Include arpa/inet.h as needed on HPUX (10c834c Harlan Stenn)
+ o Include util-internal.h as needed to build on platforms with no sockaddr_storage (bbf5515 Harlan Stenn)
+ o Check for getservbyname even if not on win32. (af08a94 Harlan Stenn)
+ o Add -D_OSF_SOURCE to fix hpux builds (0b33479 Harlan Stenn)
+ o Check for allocation failures in apply_socktype_protocol_hack (637d17a)
+ o Fix the check for multicast or broadcast addresses in evutil_check_interfaces (1a21d7b)
+ o Avoid a free(NULL) if out-of-memory in evdns_getaddrinfo. Found by Dave Hart (3417f68)
+
+DEFENSIVE PROGRAMMING:
+ o Add compile-time check for AF_UNSPEC==PF_UNSPEC (3c8f4e7)
+
+BUGS IN TESTS:
+ o Fix test.sh output on solaris (b4f89b6 Dave Hart)
+ o Make test-eof fail with a timeout if we never get an eof. (05a2c22 Harlan Stenn)
+ o Use %s with printf in test.sh (039b9bd)
+ o Add an assert to appease clang's static analyzer (b0ff7eb Sebastian Hahn)
+ o Add a forgotten return value check in the unit tests (3819b62 Sebastian Hahn)
+ o Actually send NULL request in http_bad_request_test (b693c32 Sebastian Hahn)
+ o add some (void) casts for unused variables (65707d7 Sebastian Hahn)
+ o Refactor test_getaddrinfo_async_cancel_stress() (48c44a6 Sebastian Hahn)
+ o Be nice and "handle" error return values in sample code (4bac793 Sebastian Hahn)
+ o Check return value of evbuffer_add_cb in tests (93a1abb Sebastian Hahn)
+ o Remote some dead code from dns-example.c (744c745 Sebastian Hahn)
+ o Zero a struct sockaddr_in before using it (646f9fe Sebastian Hahn)
+
+BUILD FIXES:
+ o Fix warnings about AC_LANG_PROGRAM usage (f663112 Sebastian Hahn)
+ o Skip check for zlib if we have no zlib.h (a317c06 Harlan Stenn)
+ o Fix autoconf bracket issues; make check for getaddrinfo include netdb.h (833e5e9 Harlan Stenn)
+ o Correct an AM_CFLAGS to an AM_CPPFLAGS in test/Makefile.am (9c469db Dave Hart)
+ o Fix make distcheck & installation of libevent 1 headers (b5a1f9f Dave Hart)
+ o Fix compilation under LLVM/clang with --enable-gcc-warnings (ad9ff58 Sebastian Hahn)
+
+FEATURES:
+ o Make URI parser able to tolerate nonconformant URIs. (95060b5)
+
+DOCUMENTATION:
+ o Clarify event_set_mem_functions doc (926f816)
+ o Correct evhttp_del_accept_socket documentation on whether socket is closed (f665924)
+ o fix spelling mistake in whatsnew-2.0.txt (deb2f73)
+ o Fix sample/http-server ipv6 fixes (eb692be)
+ o Comment internal headers used in sample code. (4eb281c)
+ o Be explicit about how long event loops run in event.h documentation (f95bafb)
+ o Add comment to configure.in to explain gc-sections test logic (c621359)
+ o Fix a couple of memory leaks in samples/http-server.c. Found by Dave Hart. (2e9f665)
+
+BUILD IMPROVEMENTS:
+ o Use the gcc -ffunction-segments feature to allow gc when linking with static libevent (0965c56 Dave Hart)
+ o Add configure options to disable installation, regression tests (49e9bb7 Dave Hart)
+
+
+
+Changes in version 2.0.10-stable (16 Dec 2010)
+ [Autogenerated from the Git log, sorted and cleaned by hand.]
+BUGFIXES
+ o Minor fix for IOCP shutdown handling fix (2599b2d Kelly Brock)
+ o Correctly notify the main thread when activating an event from a subthread (5beeec9)
+ o Reject overlong http requests early when Expect:100-continue is set (d23839f Constantine Verutin)
+ o EVUTIL_ASSERT: Use sizeof() to avoid "unused variable" warnings with -DNDEBUG. (b63ab17 Evan Jones)
+
+CODE CLEANUPS
+ o bufferevent-internal.h: Use the new event2/util.h header, not evutil.h (ef5e65a Evan Jones)
+ o Use relative includes instead of system includes consistently. (fbe64f2 Evan Jones)
+ o Make whitespace more consistent
+
+TESTING
+ o tests: Use new event2 headers instead of old compatibility headers. (4f33209 Evan Jones)
+
+DOCUMENTATION
+ o Document that the cpu_hint is only used on Windows with IOCP for now (57689c4)
+ o Add stuff to "whats new in 2.0" based on reading include changes since August. (18adc3f)
+
+
+Changes in 2.0.9-rc (30 Nov 2010):
+ [Autogenerated from the Git log, sorted and cleaned by hand.]
+NEW AND MODIFIED APIs
+ o Add a function to change a listener's callback. (46ee061)
+ o Make evbuffer_add_file take ev_off_t, not off_t (ac7e52d)
+ o Make rate-limits go up to SIZE_MAX/EV_SSIZE_MAX, not just INT32_MAX (2cbb1a1)
+ o Add a bufferevent_get_base function (aab49b6)
+
+MAJOR BUGFIXES
+ o Disable changelist for epoll by default because of Linux dup() bug; add an option and/or an envvar to reenable it for speed. (9531763)
+ o Fix a 100%-CPU bug where an SSL connection would sometimes never stop trying to write (1213d3d)
+ o Fix a nasty bug related to use of dup() with epoll on Linux (c281aba)
+ o Fix bugs in posix thread-id calculation when sizeof(pthread_t) != sizeof(long) (fbaf077)
+ o Fix some ints to evutil_socket_t; make tests pass on win64. (f817bfa Dimitre Piskyulev)
+ o Set _EVENT_SIZEOF_VOID_P correctly on win32 and win64 (1ae82cd Dimitre Piskyulev)
+ o Avoid double-invocation of user callback with EVUTIL_EAI_CANCEL (abf01ed)
+ o Set SO_UPDATE_ACCEPT_CONTEXT on sockets from AcceptEx so that shutdown() can work (52aa419)
+ o When closing a filtering bufferevent, clear callbacks on the underlying bufferevent (fc7b1b0)
+
+NEW AND MODIFIED HTTP APIs
+ o Add evhttp_parse_query_str to be used with evhttp_uri_parse. (2075fbc)
+ o Add evhttp_response_code to remove one more reason to include http_struct.h (22e0a9b)
+ o Define enumerators for all HTTP methods, including PATCH from RFC5789 (75a7341 Felix Nawothnig)
+ o Functions to actually use evhttp_bound_socket with/as evconnlistener. (006efa7)
+ o Add evhttp_request_get_command so code can tell GET from POST without peeking at the struct. (49f4bf7)
+ o Introduce absolute URI parsing helpers. (86dd720 Pavel Plesov)
+ o Revise evhttp_uri_parse implementation to handle more of RFC3986 (eaa5f1d)
+ o Add evhttp_connection_get_base() to get the event_base from an http connection (cd00079)
+ o Let evhttp_parse_query return -1 on failure (b1756d0)
+ o New evhttp_uri(encode|decode) functions to handle + and NUL characters right (a8148ce)
+ o Add evhttp_response_code to remove one more reason to include http_struct.h (22e0a9b)
+ o Tweak interface for allowed methods (f5b391e)
+ o Add evhttp server alias interface, correct flagging of proxy requests. (aab8c38 Christopher Davis)
+
+HTTP BUGFIXES
+ o Add some comments to http.c and make a few functions static. (90b3ed5)
+ o Fix Content-Length when trying send more than 100GB of data (!) on an evhttp. (525da3e)
+ o Fix a bug where we would read too much data in HTTP bodies or requests. (58a1cc6)
+ o Correctly count req->body_size on http usage without Content-Length (8e342e5)
+ o Avoid missed-request bug when entire http request arrives before data is flushed (74c0e86)
+ o reset "chunked" flag when sending non-chunked reply (aa5f55f Joachim Bauch)
+ o evhttp_encode_uri encodes all reserved characters, including !$'()*+,/:=@ (2e63a60)
+ o Replace exact-version checks for HTTP/1.1 with >= or < checks (647e094)
+ o evhttp: Return 501 when we get an unrecognized method, not 400. (536311a)
+ o Don't disable reading from the HTTP connection after sending the request to be notified of connection-close in time (c76640b Felix Nawothnig)
+ o Never call evhttp_readcb while writing. (0512487)
+ o Try to fix an assertion failure related to close detection (0faaa39)
+ o Correctly detect timeouts during http connects (04861d5)
+ o Preliminary support for Continue expectation in evhttp. (fa9305f Christopher Davis)
+
+OTHER BUGFIXES
+ o Correct logic for realigning a chain in evbuffer_add (e4f34e8)
+ o Fix a minor syntax error that most compilers didn't care about (e56ff65)
+ o Fix some uses of int for socket in regress (5d389dc)
+ o Check return value for ioctlsocket on win32 (f5ad31c Trond Norbye)
+ o Fix som event_warns that should have been event_warnx (19c71e7)
+ o Fix signal handler types for win64. (b81217f)
+ o Try to clear up more size_t vs int/long issues. (598d133)
+ o Make sure IOCP evconnlistener uses virtual events. (7b40a00 Christopher Davis)
+ o Don't free evdns_request handles until after the callback is invoked (9ed30de)
+ o Fix some more cancel-related bugs in getaddrinfo_async (c7cfbcf)
+ o Make evdns_getaddrinfo_cancel threadsafe (d51b2fc)
+ o Only clear underlying callbacks when the user hasn't reset them. (1ac5b23)
+ o Fix bug in bufferevent_connect on an openssl bufferevent that already had an fd (4f228a1)
+ o Resolve an evport bug in the thread/forking test (3a67d0b)
+ o Make sure the CLOEXEC flag is set on fds we open for base notification (3ab578f)
+ o Fix IRIX build. sa_family collides with a #define in sys/socket.h on IRIX. (e874982 Kevin Bowling)
+ o If not WIN32, include <sys/socket.h> in event2/util.h. (1cd45e5 Kevin Bowling)
+ o Fix some C99-style comments to work with the xlC compiler. (c2e5e22 Kevin Bowling)
+ o Add some checks since lack of TAILQ_FOREACH doesn't imply lack of FIRST, END, NEXT, or INSERT_BEFORE. Quiet some warnings in XL C. (c4dc335 Kevin Bowling)
+ o Reworked AIX __ss_family workaround to use AC_STRUCT_MEMBER. (2e2a3d7 Kevin Bowling)
+ o Take select from <sys/select.h> when testing in autoconf. AIX build fix. (a3a9f6b Kevin Bowling)
+ o Fix snprintf related failures on IRIX. (3239073 Kevin Bowling)
+ o Remove _event_initialized(); make event_initialized() a function(); make it consistent on windows and non-windows (652024b)
+ o Do not let EVLOOP_ONCE exit the loop until all deferred callbacks have run (2d5e1bd)
+ o Make EVLOOP_ONCE ignore internal events (0617a81)
+ o Possible crash fix when freeing an underlying bufferevent of an openssl bufferevent (29f7623)
+
+HTTP CLEANUPS
+ o Stop using Libevent-1 headers in regress_http (1f507d7)
+ o Modernize header usage in bench_http.c (e587069)
+ o fix signed/unsigned warnings in http.c (74a91e5)
+ o Update the HTTP regression tests to use Libevent2 apis for non-http stuff (d9ffa89)
+ o Start porting http tests to not use legacy interfaces (8505a74)
+ o Convert the rest of the http tests to be non-legacy unit tests. (9bb8239)
+ o Rename the confusing "base" static variable in regress_http.c (353402a)
+ o Stop accessing http request struct directly from in the unit tests. (0b137f4)
+ o Refactor http version parsing into a single function (a38140b)
+
+TESTING
+ o Improvements to tinytest_macros.h (ad923a1)
+ o Add a huge pile of tests for the new URI functions, and make them pass. (a5a76e6)
+ o Unit tests for evhttp_uri_set* (bc98f5e)
+ o Increase the skew tolerance to 2 seconds in thread/deferred_cb_skew (f806476 Christopher Davis)
+ o Reorder backends in test.sh to match preference order in event.c (ece974f)
+ o Add a stress test for getaddrinfo_cancel (da1bf52)
+ o Units test for unexpected evhttp methods. (75e3320)
+
+DOCUMENTATION
+ o Document behavior of URI parsing more thoroughly. (3a33462)
+ o Document that two bufferevent functions only work on socket bufferevents (70e1b60)
+ o add a requested docstring for event_rpcgen.CommandLine.__init__ (f1250eb)
+ o Fix a mistake in http documentation found by Julien Blache (229714d)
+ o Add a basic example of how to write a static HTTP server. (4e794d5)
+ o Document event_get_assignment (88be27d)
+ o Note that reentrant calls to libevent from logging cbs may fail badly (e431bcd)
+ o Clarify EVLOOP_* documentation to be more precise. (057a514)
+
+CLEANUPS
+ o Simplify the logic for choosing EPOLL_CTL_ADD vs EPOLL_CTL_MOD (2c66983)
+ o Rename "size" variables in win32select that were really fd counts. (b6a158c)
+ o Fix even more win64 warnings (7484df6)
+ o Fix even more win64 warnings: buffer, event_tagging, http, evdns, evrpc (545a611)
+ o Fix more wn64 warnings. (34b84b9 Christopher Davis)
+ o Use the label_len local variable in evdns instead of recalculating it over and over (ba01456)
+ o Fix some irix compilation warnings spotted by Kevin Bowling (7bcace2)
+
+
+
+Changes in 2.0.8-rc (14 Oct 2010):
+ [Autogenerated from the Git log, sorted and cleaned by hand.]
+NEW APIS
+ o Add error callback to evconnlistener (c4be8d8 Simon Perreault)
+ o Add a LEV_OPT_THREADSAFE option for threadsafe evconnlisteners (127d4f2)
+
+CHANGED BEHAVIOR
+ o Correct logic on disabling underlying bufferevents when disabling a filter (ac27eb8)
+
+BUGFIXES
+ o Obey enabled status when unsuspending (040a019 Simon Perreault)
+ o Warn when using the error-prone EV_SIGNAL interface in an error-prone way. Also, fix a couple of race conditions in signal.c (720bd93)
+ O Make default signal backend fully threadsafe (95a7d41)
+ o Put internal events at highest priority (90651b3)
+ o Fix warnings in the main codebase flagged by -Wsigned-compare (9c8db0, 5e4bafb, 5c214a, 6be589a, e06f514)
+ o Fix compile in kqueue.c (b395392 Sebastian Hahn)
+ o Do not search outside of the system directory for windows DLLs (d49b5e3)
+ o Fix a spurious-call bug on epoll.c (0faaee0)
+ o Send a shutdown(SHUT_WR) before closing an http connection (e0fd870 Christopher Davis)
+ o Fix warnings on mingw with gcc 4.5 (5b7a370)
+ o Fix an EINVAL on evbuffer_write_iovec on OpenSolaris. (fdc640b)
+ o Fix allocation error for IOCP listeners. Probably harmless, since struct event is big (481ef92)
+ o Make iocp/listener/error work; don't accept again if lev is disabled. (62b429a Christopher Davis)
+ o Handle rate-limiting for reading on OpenSSL bufferevents correctly. (819b171)
+ o Fix serious bugs in per-bufferevent rate-limiting code (34d64f8)
+ o Avoid spurious reads from just-created open openssl bufferevents (223ee40)
+ o Fix a case where an ssl bufferevent with CLOSE_ON_FREE didn't close its fd (93bb7d8)
+ o The corrected bufferevent filter semantics let us fix our openssl tests (34331e4)
+
+TESTING
+ o Make SSL tests cover enabling/disabling EV_READ. (a5ce9ad)
+ o Bump to the latest version of tinytest (f0bd83e)
+ o Unit tests for listener error callbacks (045eef4)
+ o New unit test for ssl bufferevents starting with connected SSLs. (02f6259)
+
+DEBUGGABILITY
+ o Make debugging output for epoll backend more comprehensive (ec2b05e)
+ o Make event.c debugging messages report fds (e119899)
+ o Make the --enable-gcc-warnings option include signed comparison warnings (d3b096c)
+
+DEADCODE REMOVAL
+ o Remove the now-useless evsig_caught and evsig_process (4858b79)
+ o Remove event_base.evsigbase; nothing used it. (38d0960)
+
+
+
+Changes in 2.0.7-rc (9 Sep 2010):
+ [Autogenerated from the Git log, sorted and cleaned by hand.]
+NEW APIS
+ o Expose a evdns_base_nameserver_sockaddr_add() function to add a nameserver by sockaddr (1952143)
+ o Add event_config_set_num_cpus_hint() for tuning win32 IOCP thread pools, etc. (2447fe8 Christopher Davis)
+
+BUGFIXES
+ o Fix a nasty dangling-event bug when using rate-limiting groups (0bffe43)
+ o Clean up syntax on TAILQ_ENTRY() usage to build correctly with recent MSVC (60433a0 Gilad Benjamini)
+ o Make definition of WIN32_LEAN_AND_MEAN in event.h conditional (3920172 Gilad Benjamini)
+ o Correctly detect failure to delete bufferevent read-timeout event (da6e7cd)
+ o Set close-on-exec bit for filedescriptors created by dns subsystem (d0b8843)
+ o Fix kqueue correctness test on x84_64 (6123d12)
+ o Detect events with no ev_base; warn instead of crashing (f1074b7)
+ o Fix an issue with forking and signal socketpairs in select/poll backends (d61b2f3)
+ o Stop using global arrays to implement the EVUTIL_ctype functions (1fdec20)
+ o On windows, make lock/thread function tables static (5de2bcb)
+ o Close th_notify_fds and open a new pair on reinit (495ed66)
+ o Declare signal handler function as "__cdecl" on Windows (f0056d0)
+ o Use the _func() replacements for open, fstat, etc in evutil.c on win32 (e50c0fc)
+ o Only process up to MAX_DEFERRED deferred_cbs at a time (17a14f1 Christopher Davis)
+
+THREADING BUGFIXES
+ o Avoid deadlock when activating signals (970e6ad)
+ o Add a condition variable backend, with implementations for pthreads and win32 (d4977b5)
+ o Use conditions instead of current_event_lock to fix a deadlock (e0972c2)
+ o Fix logic error in win32 TRY_LOCK that caused problems with rate-limiting (4c32b9d)
+ o Avoid needlessly calling evthread_notify_base() when the loop is not running (c7a06bf)
+ o Minimize calls to base_notify implementation functions, thereby avoiding needless syscalls (4632b78)
+
+IOCP BUGFIXES
+ o IOCP-related evbuffer fixes (03afa20 Christopher Davis)
+ o Stop IOCP when freeing the event_base (d844242 Christopher Davis)
+ o Some IOCP bufferevent tweaks (76f7e7a Christopher Davis)
+
+TESTS
+ o Make the regress_pthread.c tests work on windows with current test APIs (d74ae38)
+ o Add a unit test for conditions (5fb1095)
+ o Allow more than one copy of regression tests to run at once (a97320a)
+ o Fix event_del(0) instance in bench.c (b0f284c Shuo Chen)
+ o Fix a few memory leaks in the tests (1115366)
+ o IOCP-related unit test tweaks (499452f Christopher Davis)
+ o Improve testing of when thread-notification occurs (ce85280)
+
+BUILD AND DISTRIBUTION
+ o Add pkgconfig files for libevent_{openssl,pthreads} (ebcb1f0)
+ o Change include order in Makefile.nmake (4022b28)
+ o Make include/event2/event-config.h not included in source dist (a4af9be)
+ o Honor NDEBUG; build without warnings with NDEBUG; make NDEBUG always-off in unit test code (743f866)
+ o Declare evkeyvalq and event_list even if event_struct.h comes before sys/queue.h (d3ceca8)
+ o Move evkeyvalq into a separate header for evhttp_parse_query users (ca9048f)
+ o Prefer autoreconf -ivf to manual autogen.sh (7ea8e89)
+
+CLEANUP
+ o Completely remove the (mostly-removed) obsolete thread functions (3808168)
+ o Rename regress_pthread.c to regress_thread.c (041989f)
+ o Make defer-internal.h use lock macros, not direct calls to lock fns (5218d2a)
+
+DOCUMENTATION
+ o Document that DNS_NO_SEARCH is an obsolete alias for DNS_QUERY_NO_SEARCH (33200e7)
+ o Update the whatsnew-2.0.txt document (4991669)
+
+
+
+Changes in 2.0.6-rc (6 Aug 2010):
+ [Autogenerated from the Git log, sorted by hand.]
+DOCUMENTATION
+ o Document a change in the semantics of event_get_struct_event_size() (e21f5d1)
+ o Add a comment to describe our plan for library versioning (9659ece)
+ o Fix sentence fragment in docs for event_get_struct_event_size() (7b259b6)
+
+NEW FEATURES AND INTERFACE CHANGES
+ o Remove the obsolete evthread interfaces (c5bab56)
+ o Let evhttp_send_error infer the right error reasons (3990669)
+ o Add a function to retrieve the other side of a bufferevent pair (17a8e2d)
+ o Add bufferevent_lock()/bufferevent_unlock() (215e629)
+ o Stop asserting when asked for a (unsupported) TCP dns port. Just return NULL. (7e87a59)
+ o Replace (unused,always 0) is_tcp argument to evdns_add_server_port*() with flags (e1c1167)
+ o Constify a couple of arguments to evdns_server_request_add_*_reply (cc2379d)
+ o Add an interface to expose min_share in ratelimiting groups (6ae53d6)
+
+BUGFIXES
+ o Avoid event_del on uninitialized event in event_base_free (6d19510)
+ o Add some missing includes to fix Linux build again (75701e8)
+ o Avoid close of uninitialized socket in evbuffer unit test (bda21e7)
+ o Correctly recognize .255 addresses as link-local when looking for interfaces (8c3452b)
+ o If no evdns request can be launched, return NULL, not a handle (b14f151)
+ o Use generic win32 interfaces, not ASCII-only ones, where possible. (899b0a3)
+ o Fix the default HTTP error template (06bd056 Felix Nawothnig)
+ o Close the file in evutil_read_file whether there's an error or not. (0798dd1 Pierre Phaneuf)
+ o Fix possible nullptr dereference in evhttp_send_reply_end() (29b2e23 Felix Nawothnig)
+ o never let bufferevent_rlim functions return negative (0859870)
+ o Make sample/hello_world work on windows (d89fdba)
+ o Fix a deadlock related to event-base notification. Diagnosed by Zhou Li, Avi Bab, and Scott Lamb. (17522d2)
+ o Possible fix to 100% cpu usage with epoll and openssl (cf249e7 Mike Smellie)
+ o Don't race when calling event_active/event_add on a running signal event (fc5e0a2)
+ o Suppress a spurious EPERM warning in epoll.c (e73cbde)
+ o Fix wrong size calculation of iovec buffers when exact=1 (65abdc2 niks)
+ o Change bufferevent_openssl::do_write so it doesn't call SSL_write with a 0 length buffer (c991317 Mike Smellie)
+ o Fixed compilation of sample/le-proxy.c on win32 (13b912e Trond Norbye)
+ o Fix rate-limit calculation on openssl bufferevents. (009f300)
+ o Remember to initialize timeout events for bufferevent_async (de1f5d6 Christopher Davis)
+
+BUILD AND DISTRIBUTION CHANGES
+ o Test the unlocked-deferred callback case of bufferevents (dfb75ab)
+ o Remove the now-unusable EVTHREAD_LOCK/UNLOCK constants (fdfc3fc)
+ o Use -Wlogical-op on gcc 4.5 or higher (d14bb92)
+ o Add the libtool-generated /m4/* stuff to .gitignore (c21c663)
+ o Remove some automake-generated files from version control. (9b14911)
+ o Have autogen.sh pass --force-missing to automake (8a44062)
+ o Set library version for libevent_pthreads correctly (b2d7440)
+ o Really only add libevent_core.la to LIBADD on mingw (1425003 Sebastian Hahn)
+ o Build more cleanly with NetBSDs that dislike toupper(char) (42a8c71)
+ o Fix unit tests with -DUSE_DEBUG enabled (28f31a4)
+ o Fix evdns build with -DUNICODE (5fa30d2)
+ o Move event-config.h to include/event2 (ec347b9)
+
+TESTING
+ o Add options to test-ratelim.c to check its results (2b44dcc)
+ o Make test-ratelim clean up after itself better. (b5bfc44)
+ o Remove the now-obsolete setup_test() and cleanup_test() functions (e73f1d7)
+ o Remove all non-error prints from test/regress.c (8bc1e3d)
+ o Make test.sh exit with nonzero status if tests fail (faf2a04)
+ o Have the unit tests report errors from test.sh (3689bd2)
+ o Fix logic in correcting high values from FIONREAD (3467f2f)
+ o Add test for behavior on remote socket close (44d57ee)
+ o Unit test for event_get_struct_event_size() (7510aac)
+ o Make test/test.sh call test-changelist (7c92691)
+ o Fix badly-behaved subtest of dns/bufferevent_connect_hostname (840a72f Joachim Bauch)
+ o Add option to test-ratelim to test min_share (42f6b62)
+ o Fix an assertion bug in test-ratelim (b2c6202)
+ o Make tests quieter on local dns resolver failure (e996b3d)
+ o Increase the tolerance in our unit tests for sloppy clocks. (170ffd2)
+ o Use AF_INET socketpair to test sendfile on Solaris (9b60209)
+ o Make test-changelist count cpu usage right on win32 (ea1ea3d)
+
+INTERNALS, PERFORMANCE, AND CODE CLEANUPS
+ o Mark the event_err() functions as __attribute__((noreturn)) (33bbbed)
+ o Do not check that event_base is set in EVBASE_ACQUIRE_LOCK (218a3c3)
+ o Replace (safe) use of strcpy with memcpy to appease OpenBSD (caca2f4)
+ o Remove some dead assignments (47c5dfb)
+ o Fix a pedantic gcc 4.4 warning in event2/event.h (276e7ee)
+ o Drain th_notify_fd[0] more bytes at a time. (a5bc15b)
+ o Tidy up the code in evthread_make_base_notifiable a little (61e1eee)
+ o Pass flags to fcntl(F_SETFL) and fcntl(F_SETFD) as int, not long (7c2dea1)
+ o Remove unused variables in test/test-changelist.c (b00d4c0)
+ o Fix whitespace. (cb927a5)
+ o Improve error message for failed epoll to make debugging easier. (9e725f7)
+ o Turn our socketpair() replacement into its own function (57b30cd)
+
+
+
+Changes in 2.0.5-beta (10 May 2010):
+ [Autogenerated from the Git log, sorted by hand.]
+DOCUMENTATION
+ o Update all our copyright notices to say "2010" (17efc1c)
+ o Add Christopher Clark and Maxim Yegorushkin to the LICENSE file (38b7b57)
+ o Clarify Christopher Clark's status as writer of original ht code. (78772c3)
+ o Try to comment some of the event code more (cdd4c49)
+ o Add a few more evmap/changelist comments (c247adc)
+ o Add a comment to explain why evdns_request is now separte from request (ceefbe8)
+ o Document evutil_secure_rng_init() and evutil_secure_rng_add_bytes() (a5bf43a)
+ o Stop distributing and installing manpages: they were too inaccurate (7731ec8)
+
+NEW FEATURES AND INTERFACE CHANGES
+ o Remove signal_assign() and signal_new() macros. (2fac0f7)
+ o Make evdns use the regular logging system by default (b2f2be6)
+ o Allow evbuffer_read() to split across more than 2 iovecs (e470ad3)
+ o Functions to manipulate existing rate limiting groups. (ee41aca)
+ o Functions to track the total bytes sent over a rate limit group. (fb366c1)
+ o Detect and refuse reentrant event_base_loop() calls (b557b17)
+ o Limit the maximum number of events on each socket to 65535 (819f949)
+ o Add evbuffer_copyout to copy data from an evbuffer without draining (eb86c8c)
+ o Expose the request and reply members of rpc_req_generic() (07edf78 Shuo Chen)
+ o Add void* arguments to request_new and reply_new evrpc hooks (755fbf1 Shuo Chen)
+ o Seed the RNG using sysctl() as well as /dev/urandom (71fc3eb)
+ o Make evutil_secure_rng_init() work even with builtin arc4random (f980716)
+ o Report DNS error when lookup fails during bufferevent_socket_connect_hostname. (0ef4070 Christopher Davis)
+ o Release locks on bufferevents while executing callbacks (a5208fe Joachim Bauch) o Make debug mode catch mixed ET and non-ET events on an fd (cb67074)
+ o Catch attempts to enable debug_mode too late (9ecf0d4)
+ o Refuse null keys in evhttp_parse_query() (953e229 Frank Denis)
+
+BUGFIXES
+ o Avoid a spurious close(-1) on Linux (70a44b6)
+ o Do not close(-1) when freeing an uninitialized socket bufferevent (b34abf3)
+ o Free evdns_base->req_heads on evdns_base_free (859af67)
+ o Avoid an (untriggerable so far) crash bug in bufferevent_free() (0cf1431)
+ o Set mem_offset for every bufferevent type (657d1b6)
+ o Fix infrequent memory leak in bufferevent_init_common(). (8398641 Jardel Weyrich)
+ o Make evutil_signal_active() match declaration. (e1e703d Patrick Galbraith)
+ o Fix minheap code to use replacement malloc functions (a527618)
+ o Fix a free(NULL) in minheap-internal.h (6f20492)
+ o Fix critical bug in evbuffer_write when writev is not available (cda56ab)
+ o Make the no_iovecs case of write_atmost compile (8e227b0)
+ o Fix a memory leak when appending/prepending to a buffer with unused space. (45068a3)
+ o Clean up a mistake in pointer manipulation in evbuffer_remove (28bfed4 Christopher Davis)
+ o Always round up when there's a fractional number of msecs. (8f9e60c Christopher Davis)
+ o Fix compiler warnings under WIN32 (d469c50 Giuseppe Scrivano)
+ o Clean up properly when adding a signal handler fails. (b84b598 Gilad Benjamini) o Ensure that evdns_request is a persistent handle. (15bb82d Christopher Davis)
+ o Free search state when finished searching to avoid an infinite loop. (a625840 Christopher Davis)
+ o Assert for valid requests as necessary. (67072f3 Christopher Davis)
+ o do not leak the request object on persistent connections (9d8edf2)
+ o Make evdns logging threadsafe (b1c7950)
+ o Fix a couple of bugs in the BSD sysctl arc4seed logic (a47a4b7)
+ o Remove one last bug in last_with_datap logic. Found with valgrind (d49b92a)
+ o fix a leak when unpausing evrpc requests (94ee125)
+ o Fix a memory leak when unmarshalling RPC object arrays (f6ab2a2)
+ o Fix compilation when openssl support is disabled (40c301b)
+ o Allow empty reason line in HTTP status (739e688 Pierre Phaneuf)
+ o Fix a compile warning introduced in 739e688 (bd1ed5f Sebastian Hahn)
+ o Fix nonstandard TAILQ_FOREACH_REVERSE() definition (71afc52 Frank Denis)
+ o Try /proc on Linux as entropy fallback; use sysctl as last resort (20fda29)
+ o Fix symbol conflict between mm_*() macros and libmm (99e50e9)
+ o Fix some crazy macro mistakes in arc4random.c (90d4225)
+ o Make evbuffer_add_file() work on windows (dcdae6b)
+ o Fix unused-variable warning when building with threads disabled (ad811cd)
+ o Numerous opensolaris compilation fixes (c44de06)
+ o Fix getaddrinfo with protocol unset on Solaris 9. Found by Dagobert Michelsen (2cf2a28)
+ o Fix another nasty solaris getaddrinfo() behavior (3557071)
+ o Define _REENTRANT as needed on Solaris, elsewhere (c1cd32a)
+ o Fix some autoconf issues on OpenBSD (7c519df)
+
+BUILD AND DISTRIBUTION CHANGES
+ o Distribute libevent.pc.in, not libevent.pc (22aff04)
+ o Avoid errors in evutil.c when building with _UNICODE defined (b677032 Brodie Thiesfield)
+ o Avoid errors in http.c when building with VC 2003 .NET (13e4f3b Brodie Thiesfield)
+ o Support the standard 'make check' target in place of 'make verify' (426c8fb)
+ o Remove redundant stuff from EXTRA_DIST (b660edf)
+ o Switch to using AM conditionals in place of AC_LIBOBJ (2e898f5)
+ o Remove an orphaned RELEASE flag in Makefile.am (0794b0d)
+ o Give a better warning for bad automake versions. (77c917d)
+ o Use dist_bin_SCRIPTS, not EXTRA_DIST, to distribute scripts (9eb2fd7)
+ o Never test for select() on windows (3eb044d Trond Norbye)
+ o Do not inhibit automake dependencies generation (10c4c90 Giuseppe Scrivano)
+ o Create shared libraries under Windows (3cbca86 Giuseppe Scrivano)
+ o Add ctags/etags files to .gitignore (0861d17)
+ o Only specify -no-undefined on mingw (25433b9)
+ o Only add libevent_core.la to LIBADD on mingw (fdc6297)
+
+TESTING
+ o Get bench_http to work on Windows; add a switch to enable IOCP. (4ac38a5 Christopher Davis)
+ o VC has no getopt(), so do without in bench_http. (1273d2f Christopher Davis)
+ o Fix an obnoxious typo in the bufferevent_timeout_filter test (0d047c3)
+ o Fix a write of uninitialized RAM in regression tests (68dc742)
+ o Fix some memory leaks in the unit tests (274a7bd)
+ o Make 'main/many_events' test 70 fds, not 64. (33874b0)
+ o Unit-test every evbuffer_add_file() implementation. (06a4443)
+ o Add more unit tests for evbuffer_expand (8c83e99)
+ o Test another case of evbuffer_prepend (1234b95)
+ o Fix a possible double-free bug in SSL bufferevents with CLOSE_ON_FREE (7501895) o Add dns/search_cancel unit test. (39b870b Christopher Davis)
+ o Make http_base_test stop leaking an event_base. (96730d3)
+ o Detect broken unsetenv at unit-test runtime (f37cd4c)
+ o Implement regress_make_tempfile on win32 to test evbuffer_add_file (b4f12a1)
+ o add more (currently skipped) add_file tests on win32 (05de45d)
+ o Fix bench_http build on win32. (384d124)
+ o Make unit test for add_file able to tell "error" from "done" (88a543f)
+ o Make test for bufferevent_connect_hostname system-neutral (f89168e)
+ o Make test.sh support mingw/msys on win32 (0ee6f6c)
+ o Fix test.sh on freebsd (3d9e05b)
+
+INTERNALS, PERFORMANCE, AND AND CODE CLEANUPS
+ o Improve the speed of evbuffer_readln() (cc1600a)
+ o more whitespace normalization (2c2618d)
+ o Revise evbuffer to add last_with_data (2a6d2a1)
+ o Use last_with_data in place of previous_to_last (c8ac57f)
+ o Remove previous_to_last from evbuffer (6f47bd1)
+ o Fix last_with_data compilation on windows (1e7b986)
+ o Add some glass-box tests for the last_with_data code. (17da042)
+ o Improve robustness for refcounting (f1bc125)
+ o Remove a needless min_heap_shift_up_() call (7204b91)
+ o Increase MIN_BUFFER_SIZE to 512 (1024 on 64-bit) (2014ae4)
+ o Do not use evbuffer_expand() to add the first chain to a buffer (5c0ebb3)
+ o Make evbuffer_prepend handle empty buffers better (c87272b)
+ o Replace last_with_data with a slightly smarter version (b7442f8)
+ o Turn the increasingly complex *_CHAIN() macros into functions (96865c4)
+ o Rewrite evbuffer_expand and its users (d5ebcf3)
+ o Add evutil_tv_to_msec for safe conversion of timevals to milliseconds. (850c3ff Christopher Davis)
+ o Initialize last_with_datap correctly in evbuffer_overlapped (a0983b6)
+ o Replace EVUTIL_CLOSESOCKET macro with a function (899c1dc Sebastian Sjöberg)
+ o Move domain search state to evdns_request. (beaa14a Christopher Davis)
+ o Remove redundant checks for lock!=NULL before calling EVLOCK_LOCK (50ec59f)
+ o Rename current_base symbol to event_global_current_base_ (c16e684)
+ o Fix whitespace in evutil.c (935e150)
+ o Replace users of "int fd" with "evutil_socket_t fd" in portable code (c7cf6f0)
+
+
+
+Changes in 2.0.4-alpha (28 Feb 2010):
+ [Autogenerated from the Git log, sorted by hand.]
+DOCUMENTATION
+ o Add stub header for 2.0.4-alpha changelog. (94d0065)
+ o Improve the README with more information and links. (0b42726)
+ o Add more people who wrote patches to the acknowledgments (0af10d5)
+ o Add a warning about the use of event_initialized. (f32b575)
+ o Add a LICENSE file so people can find our license easily (7067006)
+ o Add a new "hello world" sample program (becb9f9)
+ o Clarify status of example programs (d60a1bd)
+ o Update time-test.c to use event2 (f4190bf)
+ o Add the arc4random.c license to the LICENSE file. (e15e1e9)
+
+NEW FEATURES AND INTERFACE CHANGES
+ o Improved optional lock debugging. (0cd3bb9)
+ o Rate-limiting for bufferevents; group and individual limits are supported. (737c9cd)
+ o Testing code for bufferevent rate-limiting. (f0c0124)
+ o Make the initial nameserver probe timeout configurable. (1e56a32)
+ o Revise the locking API: deprecate the old locking callbacks and add trylock. (347952f)
+ o Do not make bufferevent_setfd implicitly disable EV_READ and EV_WRITE. (8274379)
+ o Do not ignore bufferevent_enable(EV_READ) before bufferevent_connect(). (4a5b534)
+ o Introduced evutil_make_socket_closeonexec() to preserve fd flags for F_SETFD. (d0939d2 Jardel Weyrich)
+ o evdns_getaddrinfo() now supports the /etc/hosts file. (72dd666)
+ o Look at the proper /etc/hosts file on windows. (66c02c7)
+ o Allow http connections to use evdns for hostname looksups. (c698b77)
+ o Changelist code to defer event changes until just before dispatch (27308aa)
+ o do not use a function to assign the evdns base; instead assign it via evhttp_connection_base_new() which is a new function introduced in 2.0 (5032e52)
+ o Functions to access more fields of struct event. (0683950)
+ o Make kqueue use changelists. (45e5ae3)
+ o Remove kqueue->pend_changes. (3225dfb)
+ o Minimize epoll_ctl calls by using changelist (c8c6a89)
+ o Add support for a "debug mode" to try to catch common errors. (cd17c3a)
+ o Note a missing ratelim function (361da8f)
+ o Add ev_[u]intptr_t to include/event2/util.h (1fa4c81)
+ o const-ify a few more functions in event.h (d38a7a1)
+ o Deprecate EVENT_FD and EVENT_SIGNAL. (f6b2694)
+ o Remove EVUTIL_CHECK_FMT. (6c21c89)
+ o Add EV_*_MAX macros to event2/util.h to expose limits for ev_* types. (aba1fff) o Functions to view and manipulate rate-limiting buckets. (85047a6)
+ o Add the rest of the integer limits, and add a test for them. (60742d5)
+ o Remove the 'flags' argument from evdns_base_set_option() (1dd7e6d)
+ o Add an arc4random implementation for use by evdns (d4de062)
+ o Use off_t for the length parameter of evbuffer_add_file (3fe60fd)
+ o Construct Windows locks using InitializeCriticalSectionAndSpinCount (32c6f1b)
+ o Expose view of current rate limit as constrained by group limit (162ce8a)
+ o Provide consistent, tested semantics for bufferevent timeouts (d328829)
+
+BUGFIXES AND TESTS
+ o Tolerate code that returns from a fatal_cb. (91fe23f)
+ o Parenthesize macro arguments more aggressively (07e9e9b)
+ o Fix memory-leak of signal handler array with kqueue. (e1ffbb8)
+ o Stop passing EVTHREAD_READ and EVTHREAD_WRITE to non-rw locks. (76cd2b7)
+ o Fix two use-after-free bugs in unit tests spoted by lock debugging (d84d838)
+ o Fix a locking bug in event_base_loop() (da1718b)
+ o Fix an evdns lock violation. (2df1f82 Zhuang Yuyao)
+ o Valgrind fix: Clear struct kevent before checking for OSX bug. (56771a3 William Ahern)
+ o Fix up evthread compilation on windows (bd6f1ba Roman Puls)
+ o Fix regress_iocp.c usage of old lock allocation macros. (31687b4 unknown)
+ o Update nmake makefile to build evthread.c (b62d979 unknown)
+ o Fix a crash when reading badly formatted resolve.conf; from Yasuoka Masahiko (6c7c579 Yasuoka Masahiko)
+ o Fix a snow leopard compile warning in the unit tests. (7ae9445)
+ o Fix compile on Snow Leopard with gcc warnings enabled (70cdfe4 Sebastian Hahn)
+ o Only define _GNU_SOURCE if it is not already defined. (ea6b1df Joachim Bauch)
+ o Update sample/signal-test.c to use newer APIs and not leak. (f6430ac Evan Jones)
+ o Fix a segfault when writing a very fragmented evbuffer onto an SSL (a6adeca Joachim Bauch)
+ o Fix a segfault when freeing SSL bufferevents in an unusual order (a773df5 Joachim Bauch)
+ o Drop install-sh from our git repo: a mismatched version could break "make dist" (6799527)
+ o Set all instances of the version number correctly. (5a112d3)
+ o Fix a few locking issues on windows. (c51bb3c unknown)
+ o Use evutil_socket_t, not int, when logging socket errors. (292467c)
+ o Fix up behavior of never-defered callbacks a little (390e056)
+ o Replace some cases of uint32_t with ev_uint32_t. (a47d88d)
+ o Fix compilation of devpoll.c by adding missing thread includes. (fee2c77 Dagobert Michelsen)
+ o Make evutil_make_socket_nonblocking() leave any other flags alone. (4c8b7cd Jardel Weyrich)
+ o Fix an fd leak in evconnlistener_new_bind(). (24fb502 Jardel Weyrich)
+ o Fix a bogus free in evutil_new_addrinfo() (0d64051 Jardel Weyrich)
+ o Adjusted fcntl() retval comparison on evutil_make_socket_nonblocking(). (4df7dbc Jardel Weyrich)
+ o Fix the code that allowed DNS options to not end with : (ee4953f)
+ o Fix crash bugs when a bufferevent's eventcb is not set. (2e8eeea)
+ o Fix test-ratelim compilation on Linux. (885b427)
+ o Fix compilation of rate-limiting code on win32. (165d30e)
+ o Eradicated the last free() call. Let mm_free() take care of deallocation. (0546ce1 Jardel Weyrich)
+ o Fix byte counts when mixing deferred and non-deferred evbuffer callbacks. (29151e6)
+ o Fixed a memory leak on windows threads implementation. The CRITICAL_SECTION was not being free'd in evthread_win32_lock_free(). (2f33e00 Jardel Weyrich)
+ o Fixed a fd leak in start_accepting(), plus cosmetic changes (4367a33 Jardel Weyrich)
+ o Improved error handling in evconnlistener_new_async(). Also keeping the fd open because it is not opened by this function, so the caller is responsible for closing it. Additionally, since evconnlistener_new_bind() creates a socket and passes it to the function above, it required error checking to close the same socket. (fec66f9 Jardel Weyrich)
+ o Don't use a bind address for nameservers on loopback (8d4aaf9)
+ o Fix compilation of rate-limit code when threading support is disabled (97a8c79)
+ o Detect setenv/unsetenv; skip main/base_environ test if we can't fake them. (7296971)
+ o Check more internal event_add() calls for failure (ff3f6cd)
+ o Fix windows and msvc build (5c7a7bc)
+ o Call event_debug_unassign on internal events (a19b4a0)
+ o Try to fix a warning in hash_debug_entry (137f2c6)
+ o Fix a dumb typo in ev_intptr_t definitions. (27c9a40)
+ o do not fail while sending on http connections the client closed. (93d7369)
+ o make evhttp_send() safe against terminated connections, too (3978180)
+ o Make Libevent 1.4.12 build on win32 with Unicode enabled. (000a33e Brodie Thiesfield)
+ o Fix some additional -DUNICODE issues on win32. (a7a9431)
+ o Add a check to make soure our EVUTIL_AI flags do not conflict with the native ones (c18490e)
+ o Always use our own gai_strerror() replacement. (6810bdb)
+ o Make RNG work when we have arc4random() but not arc4random_buf() (4ec8fea)
+ o validate close cb on server when client connection closes (2f782af)
+ o Fix two unlocked reads in evbuffer. (7116bf2)
+ o When working without a current event base, don't try to use IOCP listeners (cb52838)
+ o Fix getpid() usage on Windows (ff2a134)
+ o Add a unit test for secure rng. (48a29b6)
+ o Add some headers to fix freebsd compilation (b72be50)
+ o When connect() succeeds immediately, don't invoke the callback immediately. (7515de9)
+ o Suspend read/write on bufferevents during hostname lookup (db08f64)
+ o Make bufferevent_free() clear all callbacks immediately. (b2fbeb3)
+ o Fix some race conditions in persistent events and event_reinit (e2642f0)
+ o Fix a bug in resetting timeouts on persistent events when IO triggers. (38ec0a7)
+ o Add a test for timeouts on filtering bufferevents. (c02bfe1)
+ o Add test for periodic timers that get activated for other reasons (8fcb7a1)
+ o Use new timeval diff comparison function in bufferevent test (f3dfe46)
+ o Delete stack-alloced event in new unit test before returning. (7ffd387)
+ o Fix mingw compilation (23170a6)
+ o Try to define a sane _EVENT_SIZEOF_SIZE_T for msvc compilation (1e14f82)
+ o Fix arc4random compilation on MSVC. (98edb89)
+ o deal with connect() failing immediately (7bc48bf)
+ o Small cleanups on freebsd-connect-refused patch. (57b7248)
+
+BUILD AND DISTRIBUTION CHANGES
+ o Remove the contents of WIN32-Prj as unmaintained. (c69d5a5)
+ o Allow the user to redirect the verbose output of test/test.sh to a file (c382de6)
+ o Allow test.sh to be run as ./test/test.sh (7dfbe94)
+ o Never believe that we have pthreads on win32, even if gcc thinks we do. (78ed097)
+ o Make it compile under gcc --std=c89. (e2ca403)
+ o Fix a number of warnings from gcc -pedantic (918e9c5)
+ o Add the msvc-generated .lib files to .gitignore. (e244a2e)
+ o Add the "compile" script to gitignore. (1ba6bed)
+
+INTERNALS AND CODE CLEANUPS
+ o Add a .gitignore file. (ba34071)
+ o New EVTHREAD_TRY_LOCK function to try to grab a lock. (689fc09)
+ o Add the abilitity to mark some buffer callbacks as never-deferred. (438f9ed)
+ o Refactor our 'suspend operation' logic on bufferevents. (0d744aa)
+ o Simplify the read high-watermark checking. (5846bf6)
+ o Improve readability of evutil_unparse_protoname() (5a43df8 Jardel Weyrich)
+ o Expose our cached gettimeofday value with a new interface (47854a8)
+ o Whitespace fixes in test.sh (0b151a9)
+ o Enable branch-prediction hints with EVUTIL_UNLIKELY. (eaaf27f)
+ o Refactor code from evdns into a new internal "read a file" function. (0f7144f)
+ o Comestic changes in evconnlistener_new(), new_accepting_socket(), accepted_socket_invoke_user_cb() and iocp_listener_enable(). (510ab6b Jardel Weyrich)
+ o Add unit-test for bad_request bug fixed in 1.4 recently. (6cc79c6 Pavel Plesov) o Add a comment on evthread_enable_lock_debuging. (b9f43b2)
+ o Fix test.sh on shells without echo -n (94131e9)
+ o More unit tests for getaddrinfo_async: v4timeout and cancel. (a334b31)
+ o Make http use evconnlistener. (ec34533)
+ o move dns utility functions into a separate file so that we can use them for http testing (b822639)
+ o add a test for evhttp_connection_base_new with a dns_base (26714ca)
+ o forgot to add void to test function (78a50fe)
+ o Add a forgotten header (changelist-internal.h) (4b9f307)
+ o Remove some commented-out code in evutil (26e1b6f)
+ o Remove a needless include of rpc_compat.h (70a4a3e)
+ o Use less memory for each entry in a hashtable (a66e947)
+ o Try to untangle the logic in server_port_flush(). (439aea0)
+ o Use ev_[u]intptr_t types in place of [u]intptr_t (cef61a2)
+ o Reduce windows header includes in our own headers. (da6135e)
+ o clean up terminate_chunked test (e8a9782)
+ o Increment the submicro version number. (63e868e)
+ o Update event-config.h version number to match configure.in (aae7db5)
+ o Clean up formatting: Disallow space-before-tab. (8fdf09c)
+ o Clean up formatting: use tabs, not 8-spaces, to indent. (e5bbd40)
+ o Clean up formatting: remove trailing spaces (e5cf987)
+ o Clean up formatting: function/keyword spacing consistency. (4faeaea)
+
+
+
+Changes in 2.0.3-alpha (20 Nov 2009):
+ o Add a new code to support SSL/TLS on bufferevents, using the OpenSSL library (where available).
+ o Fix a bug where we didn't allocate enough memory in event_get_supported_methods().
+ o Avoid segfault during failed allocation of locked evdns_base. (Found by Rocco Carbone.)
+ o Export new evutil_ascii_* functions to perform locale-independent character type operations.
+ o Try to compile better with MSVC: patches from Brodie Thiesfield
+ o New evconnlistener_get_fd function to expose a listener's associated socket.
+ o Expose an ev_socklen_t type for consistent use across platforms.
+ o Make bufferevent_socket_connect() work when the original fd was -1.
+ o Fix a bug in bufferevent_socket_connect() when the connection succeeds too quickly.
+ o Export an evutil_sockaddr_cmp() to compare to sockaddr objects for equality.
+ o Add a bufferevent_get_enabled() to tell what a bufferevent has been configured to do.
+ o Add an evbuffer_search_eol() function to locate the end of a line nondestructively.
+ o Add an evbuffer_search_range() function to search a bounded range of a buffer.
+ o Fix a rare crash bug in evdns.
+ o Have bufferevent_socket_connect() with no arguments put a bufferevent into connecting mode.
+ o Support sendfile on Solaris: patch from Caitlin Mercer.
+ o New functions to explicitly reference a socket used by an evhttp object. Patches from David Reiss.
+ o When we send a BEV_EVENT_CONNECTED to indicate connected status, we no longer invoke the write callback as well unless we actually wrote data too.
+ o If the kernel tells us that there are a negative number of bytes to read from a socket, do not believe it. Fixes bug 2841177; found by Alexander Pronchenkov.
+ o Do not detect whether we have monotonic clock support every time a new event base is created: instead do it only once. Patch taken from Chromium.
+ o Do not allocate the maximum event queue for the epoll backend at startup. Instead, start out accepting 32 events at a time, and double the queue's size when it seems that the OS is generating events faster than we're requesting them. Saves up to 374K per epoll-based event_base. Resolves bug 2839240.
+ o Treat an event with a negative fd as valid but untriggerable by Libevent. This is useful for applications that want to manually activate events.
+ o Fix compilation on Android, which forgot to define fd_mask in its sys/select.h
+ o Do not drop data from evbuffer when out of memory; reported by Jacek Masiulaniec
+ o New event_base_got_exit() and event_base_got_break() functions to tell whether an event loop exited because of an event_base_loopexit() or an event_base_loopbreak(). Patch from Ka-Hing Cheung.
+ o When adding or deleting an event from a non-main thread, only wake up the main thread when its behavior actually needs to change.
+ o Fix some bugs when using the old evdns interfaces to initialize the evdns module.
+ o Detect errors during bufferevent_connect(). Patch from Christopher Davis.
+ o Fix compilation for listener.h for C++ - missing extern "C". Patch from Ferenc Szalai.
+ o Make the event_base_loop() family of functions respect thread-safety better. This should clear up a few hard-to-debug race conditions.
+ o Fix a bug when using a specialized memory allocator on win32.
+ o Have the win32 select() backend label TCP-socket-connected events as EV_WRITE, not EV_READ. This should bring it in line with the other backends, and improve portability. Patch from Christopher Davis.
+ o Stop using enums as arguments or return values when what we mean is a bitfield of enum values. C++ doesn't believe that you can OR two enum values together and get another enum, and C++ takes its typing seriously. Patch from Christopher Davis.
+ o Add an API to replace all fatal calls to exit() with a user-provided panic function.
+ o Replace all assert() calls with a variant that is aware of the user-provided logging and panic functions.
+ o Add a return value to event_assign so that it can fail rather than asserting when the user gives it bad input. event_set still dies on bad input.
+ o The event_base_new() and event_base_new_with_config() functions now never call exit() on failure. For backward "compatibility", event_init() still does, but more consistently.
+ o Remove compat/sys/_time.h. It interfered with system headers on HPUX, and its functionality has been subsumed by event2/util.h and util-internal.h.
+ o Add a new bufferevent_socket_connect_hostname() to encapsulate the resolve-then-connect operation.
+ o Build kqueue.c correctly on GNU/kFreeBSD platforms. Patch pulled upstream from Debian.
+ o Alternative queue-based timeout algorithm for programs that use a large number of timeouts with the same value.
+ o New event_base_config option to disable the timeval cache entirely.
+ o Make EV_PERSIST timeouts more accurate: schedule the next event based on the scheduled time of the previous event, not based on the current time.
+ o Allow http.c to handle cases where getaddrinfo returns an IPv6 address. Patch from Ryan Phillips.
+ o Fix a problem with excessive memory allocation when using multiple event priorities.
+ o Default to using arc4random for DNS transaction IDs on systems that have it; from OpenBSD.
+ o Never check the environment when we're running setuid or setgid; from OpenBSD.
+ o Options passed to evdns_set_option() no longer need to end with a colon.
+ o Add an evutil_getaddrinfo() function to clone getaddrinfo on platforms that don't have it.
+ o Add an evdns_getaddrinfo() function to provide a nonblocking getaddrinfo using evdns, so programs can perform useful hostname lookup.
+ o Finally expose the IOCP-based bufferevent backend. It passes its unit tests, but probably still has some bugs remaining. Code by Nick Mathewson and Christopher Davis.
+ o Numerous other bugfixes.
+ o On FreeBSD and other OSes, connect can return ECONREFUSED immediately; instead of failing the function call, pretend with faileld in the callback.
+ o Fix a race condition in the pthreads test case; found by Nick Mathewson
+ o Remove most calls to event_err() in http and deal with memory errors instead
+
+
+
+Changes in 2.0.2-alpha (25 Jul 2009):
+ o Add a new flag to bufferevents to make all callbacks automatically deferred.
+ o Make evdns functionality locked, and automatically defer dns callbacks.
+ o Fix a possible free(NULL) when freeing an event_base with no signals.
+ o Add a flag to disable checking environment varibles when making an event_base
+ o Disallow setting less than 1 priority.
+ o Fix a bug when removing a timeout from the heap. [Patch from Marko Kreen]
+ o Use signal.h, not sys/signal.h. [Patch from mmadia]
+ o Try harder to build with certain older c99 compilers.
+ o Make sure that an event_config's flags field is always initialized to 0. [Bug report from Victor Goya]
+ o Avoid data corruption when reading data entirely into the second-to-last chain of an evbuffer. [Bug report from Victor Goya]
+ o Make sendfile work on FreeBSD
+ o Do not use vararg macros for accessing evrpc structures; this is not backwards compatible, but we did not promise any backwards compatibility for the rpc code.
+ o Actually define the event_config_set_flag() function.
+ o Try harder to compile with Visual C++.
+ o Move event_set() and its allies to event2/event_compat.h where they belong.
+ o Remove the event_gotsig code, which has long been deprecated and unused.
+ o Add an event_get_base() function to return the base assigned to an event.
+ o New function to automate connecting on a socket-based bufferevent.
+ o New functions to automate listening for incoming TCP connections.
+ o Do case-insensitive checks with a locale-independent comparison function.
+ o Rename the evbuffercb and everrorcb callbacks to bufferevent_data_cb and bufferevent_event_cb respectively. The old names are available in bufferevent_compat.h.
+ o Rename the EVBUFFER_* codes used by bufferevent event callbacks to BEV_EVENT_*, to avoid namespace collision with evbuffer flags. The old names are available in bufferevent_compat.h.
+ o Move the EVBUFFER_INPUT and EVBUFFER_OUTPUT macros to bufferevent_compat.h
+ o Add a bufferevent_getfd() function to mirror bufferevent_setfd()
+ o Make bufferevent_setfd() return an error code if the operation is not successful.
+ o Shave 22 bytes off struct event on 32-bit platforms by shrinking and re-ordering fields. The savings on 64-bit platforms is likely higher.
+ o Cap the maximum number of priorities at 256.
+ o Change the semantics of evbuffer_cb_set_flags() to be set-flag only; add a new evbuffer_cb_clear_flags() to remove set flags.
+ o Change the interface of evbuffer_add_reference so that the cleanup callback gets more information
+ o Revise the new evbuffer_reserve_space/evbuffer_commit_space() interfaces so that you can use them without causing extraneous copies or leaving gaps in the evbuffer.
+ o Add a new evbuffer_peek() interface to inspect data in an evbuffer without removing it.
+ o Fix a deadlock when suspending reads in a bufferevent due to a full buffer. (Spotted by Joachim Bauch.)
+ o Fix a memory error when freeing a thread-enabled event base with registered events. (Spotted by Joachim Bauch.)
+ o Try to contain degree of failure when running on a win32 version so heavily firewalled that we can't fake a socketpair.
+ o Activate fd events in a pseudorandom order with O(N) backends, so that we don't systematically favor low fds (select) or earlier-added fds (poll, win32).
+ o Replace some read()/write() instances with send()/recv() to work properly on win32.
+ o Set truncated flag correctly in evdns server replies.
+ o Raise RpcGenError in event_rpcgen.py; from jmanison and Zack Weinberg
+ o Fix preamble of rpcgen-generated files to rely on event2 includes; based on work by jmansion; patch from Zack Weinberg.
+ o Allow specifying the output filename for rpcgen; based on work by jmansion; patch from Zack Weinberg.
+ o Allow C identifiers as struct names; allow multiple comments in .rpc files; from Zack Weinberg
+ o Mitigate a race condition when using socket bufferevents in multiple threads.
+ o Use AC_SEARCH_LIBS, not AC_CHECK_LIB to avoid needless library use.
+ o Do not allow event_del(ev) to return while that event's callback is executing in another thread. This fixes a nasty race condition.
+ o event_get_supported_methods() now lists methods that have been disabled with the EVENT_NO* environment options.
+ o Rename encode_int[64] to evtag_encode_int[64] to avoid polluting the global namespace. The old method names are still available as macros in event2/tag_compat.h.
+
+
+
+Changes in 2.0.1-alpha (17 Apr 2009):
+ o free minheap on event_base_free(); from Christopher Layne
+ o debug cleanups in signal.c; from Christopher Layne
+ o provide event_base_new() that does not set the current_base global
+ o bufferevent_write now uses a const source argument; report from Charles Kerr
+ o improve documentation on event_base_loopexit; patch from Scott Lamb
+ o New function, event_{base_}loopbreak. Like event_loopexit, it makes an event loop stop executing and return. Unlike event_loopexit, it keeps subsequent pending events from getting executed. Patch from Scott Lamb
+ o Check return value of event_add in signal.c
+ o provide event_reinit() to reintialize an event_base after fork
+ o New function event_set_mem_functinons. It allows the user to give libevent replacement functions to use for memory management in place of malloc(), free(), etc. This should be generally useful for memory instrumentation, specialized allocators, and so on.
+ o The kqueue implementation now catches signals that are raised after event_add() is called but before the event_loop() call. This makes it match the other implementations.
+ o The kqueue implementation now restores original signal handlers correctly when its signal events are removed.
+ o Check return value of event_add in signal.c
+ o Add a more powerful evbuffer_readln as a replacement for evbuffer_readline. The new function handles more newline styles, and is more useful with buffers that may contain a nul characters.
+ o Do not mangle socket handles on 64-bit windows.
+ o The configure script now takes an --enable-gcc-warnigns option that turns on many optional gcc warnings. (Nick has been building with these for a while, but they might be useful to other developers.)
+ o move EV_PERSIST handling out of the event backends
+ o small improvements to evhttp documentation
+ o always generate Date and Content-Length headers for HTTP/1.1 replies
+ o set the correct event base for HTTP close events
+ o When building with GCC, use the "format" attribute to verify type correctness of calls to printf-like functions.
+ o Rewrite win32.c backend to be O(n lg n) rather than O(n^2).
+ o Removed obsoleted recalc code
+ o support for 32-bit tag numbers in rpc structures; this is wire compatible, but changes the API slightly.
+ o pull setters/getters out of RPC structures into a base class to which we just need to store a pointer; this reduces the memory footprint of these structures.
+ o prefix {encode,decode}_tag functions with evtag to avoid collisions
+ o fix a bug with event_rpcgen for integers
+ o Correctly handle DNS replies with no answers set (Fixes bug 1846282)
+ o add -Wstrict-aliasing to warnings and more cleanup
+ o removed linger from http server socket; reported by Ilya Martynov
+ o event_rpcgen now allows creating integer arrays
+ o support string arrays in event_rpcgen
+ o change evrpc hooking to allow pausing of RPCs; this will make it possible for the hook to do some meaning ful work; this is not backwards compatible.
+ o allow an http request callback to take ownership of a request structure
+ o allow association of meta data with RPC requests for hook processing
+ o associate more context for hooks to query such as the connection object
+ o remove pending timeouts on event_base_free()
+ o also check EAGAIN for Solaris' event ports; from W.C.A. Wijngaards
+ o devpoll and evport need reinit; tested by W.C.A Wijngaards
+ o event_base_get_method; from Springande Ulv
+ o Send CRLF after each chunk in HTTP output, for compliance with RFC2626. Patch from "propanbutan". Fixes bug 1894184.
+ o Add a int64_t parsing function, with unit tests, so we can apply Scott Lamb's fix to allow large HTTP values.
+ o Use a 64-bit field to hold HTTP content-lengths. Patch from Scott Lamb.
+ o Allow regression code to build even without Python installed
+ o remove NDEBUG ifdefs from evdns.c
+ o detect integer types properly on platforms without stdint.h
+ o udpate documentation of event_loop and event_base_loop; from Tani Hosokawa.
+ o simplify evbuffer by removing orig_buffer
+ o do not insert event into list when evsel->add fails
+ o add support for PUT/DELETE requests; from Josh Rotenberg
+ o introduce evhttp_accept_socket() to accept from an already created socket
+ o include Content-Length in reply for HTTP/1.0 requests with keep-alive
+ o increase listen queue for http sockets to 128; if that is not enough the evhttp_accpet_socket() api can be used with a prepared socket.
+ o Patch from Tani Hosokawa: make some functions in http.c threadsafe.
+ o test support for PUT/DELETE requests; from Josh Rotenberg
+ o rewrite of the evbuffer code to reduce memory copies
+ o Some older Solaris versions demand that _REENTRANT be defined to get strtok_r(); do so.
+ o Do not free the kqop file descriptor in other processes, also allow it to be 0; from Andrei Nigmatulin
+ o Provide OpenSSL style support for multiple threads accessing the same event_base
+ o make event_rpcgen.py generate code include event-config.h; reported by Sam Banks.
+ o switch thread support so that locks get allocated as they are needed.
+ o make event methods static so that they are not exported; from Andrei Nigmatulin
+ o make RPC replies use application/octet-stream as mime type
+ o do not delete uninitialized timeout event in evdns
+ o Correct the documentation on buffer printf functions.
+ o Don't warn on unimplemented epoll_create(): this isn't a problem, just a reason to fall back to poll or select.
+ o Correctly handle timeouts larger than 35 minutes on Linux with epoll.c. This is probably a kernel defect, but we'll have to support old kernels anyway even if it gets fixed.
+ o Make name_from_addr() threadsafe in http.c
+ o Add new thread-safe interfaces to evdns functions.
+ o Make all event_tagging interfaces threadsafe.
+ o Rename internal memory management functions.
+ o New functions (event_assign, event_new, event_free) for use by apps that want to be safely threadsafe, or want to remain ignorant of the contents of struct event.
+ o introduce bufferevent_read_buffer; allows reading without memory copy.
+ o expose bufferevent_setwatermark via header files and fix high watermark on read
+ o fix a bug in buffrevent read water marks and add a test for them
+ o fix a bug in which bufferevent_write_buffer would not schedule a write event
+ o provide bufferevent_input and bufferevent_output without requiring knowledge of the structure
+ o introduce bufferevent_setcb and bufferevent_setfd to allow better manipulation of bufferevents
+ o convert evhttp_connection to use bufferevents.
+ o use libevent's internal timercmp on all platforms, to avoid bugs on old platforms where timercmp(a,b,<=) is buggy.
+ o Remove the never-exported, never-used evhttp_hostportfile function.
+ o Support input/output filters for bufferevents; somewhat similar to libio's model. This will allow us to implement SSL, compression, etc, transparently to users of bufferevents such as the http layer.
+ o allow connections to be removed from an rpc pool
+ o add new evtimer_assign, signal_assign, evtimer_new, and signal_new functions to manipulate timer and signal events, analagous to the now-recommended event_assign and event_new
+ o switch internal uses of event_set over to use event_assign.
+ o introduce evbuffer_contiguous_space() api that tells a user how much data is available in the first buffer chain
+ o introduce evbuffer_reserve_space() and evbuffer_commit_space() to make processing in filters more efficient.
+ o reduce system calls for getting current time by caching it.
+ o separate signal events from io events; making the code less complex.
+ o support for periodic timeouts
+ o support for virtual HTTP hosts.
+ o turn event_initialized() into a function, and add function equivalents to EVENT_SIGNAL and EVENT_FD so that people don't need to include event_struct.h
+ o Build test directory correctly with CPPFLAGS set.
+ o Provide an API for retrieving the supported event mechanisms.
+ o event_base_new_with_config() and corresponding config APIs.
+ o migrate the evhttp header to event2/ but accessors are still missing.
+ o deprecate timeout_* event functions by moving them to event_compat.h
+ o Move windows gettimeofday replacement into a new evutil_gettimeofday().
+ o Make configure script work on IRIX.
+ o provide a method for canceling ongoing http requests.
+ o Make vsnprintf() returns consistent on win32.
+ o Fix connection keep-alive behavior for HTTP/1.0
+ o Fix use of freed memory in event_reinit; pointed out by Peter Postma
+ o constify struct timeval * where possible
+ o make event_get_supported_methods obey environment variables
+ o support for edge-triggered events on epoll and kqueue backends: patch from Valery Kholodkov
+ o support for selecting event backends by their features, and for querying the features of a backend.
+ o change failing behavior of event_base_new_with_config: if a config is provided and no backend is selected, return NULL instead of aborting.
+ o deliver partial data to request callbacks when chunked callback is set even if there is no chunking on the http level; allows cancelation of requests from within the chunked callback; from Scott Lamb.
+ o allow min_heap_erase to be called on removed members; from liusifan.
+ o Rename INPUT and OUTPUT to EVRPC_INPUT and EVRPC_OUTPUT. Retain INPUT/OUTPUT aliases on on-win32 platforms for backwards compatibility.
+ o Do not use SO_REUSEADDR when connecting
+ o Support 64-bit integers in RPC structs
+ o Correct handling of trailing headers in chunked replies; from Scott Lamb.
+ o Support multi-line HTTP headers; based on a patch from Moshe Litvin
+ o Reject negative Content-Length headers; anonymous bug report
+ o Detect CLOCK_MONOTONIC at runtime for evdns; anonymous bug report
+ o Various HTTP correctness fixes from Scott Lamb
+ o Fix a bug where deleting signals with the kqueue backend would cause subsequent adds to fail
+ o Support multiple events listening on the same signal; make signals regular events that go on the same event queue; problem report by Alexander Drozdov.
+ o Fix a problem with epoll() and reinit; problem report by Alexander Drozdov.
+ o Fix off-by-one errors in devpoll; from Ian Bell
+ o Make event_add not change any state if it fails; reported by Ian Bell.
+ o Fix a bug where headers arriving in multiple packets were not parsed; fix from Jiang Hong; test by me.
+ o Match the query in DNS replies to the query in the request; from Vsevolod Stakhov.
+ o Add new utility functions to correctly observe and log winsock errors.
+ o Do not remove Accept-Encoding header
+ o Clear the timer cache on entering the event loop; reported by Victor Chang
+ o Only bind the socket on connect when a local address has been provided; reported by Alejo Sanchez
+ o Allow setting of local port for evhttp connections to support millions of connections from a single system; from Richard Jones.
+ o Clear the timer cache when leaving the event loop; reported by Robin Haberkorn
+ o Fix a typo in setting the global event base; reported by lance.
+ o Set the 0x20 bit on outgoing alphabetic characters in DNS requests randomly, and insist on a match in replies. This helps resist DNS poisoning attacks.
+ o Make the http connection close detection work properly with bufferevents and fix a potential memory leak associated with it.
+ o Restructure the event backends so that they do not need to keep track of events themselves, as a side effect multiple events can use the same fd or signal.
+ o Add generic implementations for parsing and emiting IPv6 addresses on platforms that do not have inet_ntop and/or inet_pton.
+ o Allow DNS servers that have IPv6 addresses.
+ o Add an evbuffer_write_atmost() function to write a limited number of bytes to an fd.
+ o Refactor internal notify-main-thread logic to prefer eventfd to pipe, then pipe to socketpair, and only use socketpairs as a last resort.
+ o Try harder to pack all evbuffer reads into as few chains as possible, using readv/WSARecv as appropriate.
+ o New evthread_use_windows_threads() and evthread_use_pthreads() functions to set up the evthread callbacks with reasonable defaults.
+ o Change the semantics of timeouts in conjunction with EV_PERSIST; timeouts in that case will now repeat until deleted.
+ o sendfile, mmap and memory reference support for evbuffers.
+ o New evutil_make_listen_socket_reuseable() to abstract SO_REUSEADDR.
+ o New bind-to option to allow DNS clients to bind to an arbitrary port for outgoing requests.
+ o evbuffers can now be "frozen" to prevent operations at one or both ends.
+ o Bufferevents now notice external attempts to add data to an inbuf or remove it from an outbuf, and stop them.
+ o Fix parsing of queries where the encoded queries contained \r, \n or +
+ o Do not allow internal events to starve lower-priority events.
+
diff --git a/libs/libevent/docs/Doxyfile b/libs/libevent/docs/Doxyfile
new file mode 100644
index 0000000000..d9d6603459
--- /dev/null
+++ b/libs/libevent/docs/Doxyfile
@@ -0,0 +1,257 @@
+# Doxyfile 1.5.1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = libevent
+
+# Place all output under 'doxygen/'
+
+OUTPUT_DIRECTORY = doxygen/
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like the Qt-style comments (thus requiring an
+# explicit @brief command for a brief description.
+
+JAVADOC_AUTOBRIEF = YES
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH = include/
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = \
+ include/event2/buffer.h \
+ include/event2/buffer_compat.h \
+ include/event2/bufferevent.h \
+ include/event2/bufferevent_compat.h \
+ include/event2/bufferevent_ssl.h \
+ include/event2/dns.h \
+ include/event2/dns_compat.h \
+ include/event2/event.h \
+ include/event2/event_compat.h \
+ include/event2/http.h \
+ include/event2/http_compat.h \
+ include/event2/listener.h \
+ include/event2/rpc.h \
+ include/event2/rpc_compat.h \
+ include/event2/tag.h \
+ include/event2/tag_compat.h \
+ include/event2/thread.h \
+ include/event2/util.h
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = NO
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED = TAILQ_ENTRY RB_ENTRY EVENT_DEFINED_TQENTRY_ EVENT_IN_DOXYGEN_
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
diff --git a/libs/libevent/docs/LICENSE b/libs/libevent/docs/LICENSE
new file mode 100644
index 0000000000..402ca50896
--- /dev/null
+++ b/libs/libevent/docs/LICENSE
@@ -0,0 +1,99 @@
+Libevent is available for use under the following license, commonly known
+as the 3-clause (or "modified") BSD license:
+
+==============================
+Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+==============================
+
+Portions of Libevent are based on works by others, also made available by
+them under the three-clause BSD license above. The copyright notices are
+available in the corresponding source files; the license is as above. Here's
+a list:
+
+log.c:
+ Copyright (c) 2000 Dug Song <dugsong@monkey.org>
+ Copyright (c) 1993 The Regents of the University of California.
+
+strlcpy.c:
+ Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
+
+win32select.c:
+ Copyright (c) 2003 Michael A. Davis <mike@datanerds.net>
+
+evport.c:
+ Copyright (c) 2007 Sun Microsystems
+
+ht-internal.h:
+ Copyright (c) 2002 Christopher Clark
+
+minheap-internal.h:
+ Copyright (c) 2006 Maxim Yegorushkin <maxim.yegorushkin@gmail.com>
+
+==============================
+
+The arc4module is available under the following, sometimes called the
+"OpenBSD" license:
+
+ Copyright (c) 1996, David Mazieres <dm@uun.org>
+ Copyright (c) 2008, Damien Miller <djm@openbsd.org>
+
+ Permission to use, copy, modify, and distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+==============================
+
+The Windows timer code is based on code from libutp, which is
+distributed under this license, sometimes called the "MIT" license.
+
+
+Copyright (c) 2010 BitTorrent, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/libs/libevent/docs/Makefile.am b/libs/libevent/docs/Makefile.am
new file mode 100644
index 0000000000..3053cc016a
--- /dev/null
+++ b/libs/libevent/docs/Makefile.am
@@ -0,0 +1,303 @@
+# Makefile.am for libevent
+# Copyright 2000-2007 Niels Provos
+# Copyright 2007-2012 Niels Provos and Nick Mathewson
+#
+# See LICENSE for copying information.
+
+# 'foreign' means that we're not enforcing GNU package rules strictly.
+# '1.9' means that we need automake 1.9 or later (and we do).
+AUTOMAKE_OPTIONS = foreign 1.9 subdir-objects
+
+ACLOCAL_AMFLAGS = -I m4
+
+# This is the "Release" of the Libevent ABI. It takes precedence over
+# the VERSION_INFO, so that two versions of Libevent with the same
+# "Release" are never binary-compatible.
+#
+# This number incremented once for the 2.0 release candidate, and
+# will increment for each series until we revise our interfaces enough
+# that we can seriously expect ABI compatibility between series.
+#
+RELEASE = -release 2.1
+
+# This is the version info for the libevent binary API. It has three
+# numbers:
+# Current -- the number of the binary API that we're implementing
+# Revision -- which iteration of the implementation of the binary
+# API are we supplying?
+# Age -- How many previous binary API versions do we also
+# support?
+#
+# To increment a VERSION_INFO (current:revision:age):
+# If the ABI didn't change:
+# Return (current:revision+1:age)
+# If the ABI changed, but it's backward-compatible:
+# Return (current+1:0:age+1)
+# If the ABI changed and it isn't backward-compatible:
+# Return (current+1:0:0)
+#
+# Once an RC is out, DO NOT MAKE ANY ABI-BREAKING CHANGES IN THAT SERIES
+# UNLESS YOU REALLY REALLY HAVE TO.
+VERSION_INFO = 5:0:0
+
+# History: RELEASE VERSION_INFO
+# 2.0.1-alpha -- 2.0 1:0:0
+# 2.0.2-alpha -- 2:0:0
+# 2.0.3-alpha -- 2:0:0 (should have incremented; didn't.)
+# 2.0.4-alpha -- 3:0:0
+# 2.0.5-beta -- 4:0:0
+# 2.0.6-rc -- 2.0 2:0:0
+# 2.0.7-rc -- 2.0 3:0:1
+# 2.0.8-rc -- 2.0 4:0:2
+# 2.0.9-rc -- 2.0 5:0:0 (ABI changed slightly)
+# 2.0.10-stable-- 2.0 5:1:0 (No ABI change)
+# 2.0.11-stable-- 2.0 6:0:1 (ABI changed, backward-compatible)
+# 2.0.12-stable-- 2.0 6:1:1 (No ABI change)
+# 2.0.13-stable-- 2.0 6:2:1 (No ABI change)
+# 2.0.14-stable-- 2.0 6:3:1 (No ABI change)
+# 2.0.15-stable-- 2.0 6:3:1 (Forgot to update :( )
+# 2.0.16-stable-- 2.0 6:4:1 (No ABI change)
+# 2.0.17-stable-- 2.0 6:5:1 (No ABI change)
+# 2.0.18-stable-- 2.0 6:6:1 (No ABI change)
+# 2.0.19-stable-- 2.0 6:7:1 (No ABI change)
+# 2.0.20-stable-- 2.0 6:8:1 (No ABI change)
+# 2.0.21-stable-- 2.0 6:9:1 (No ABI change)
+#
+# For Libevent 2.1:
+# 2.1.1-alpha -- 2.1 1:0:0
+# 2.1.2-alpha -- 2.1 1:0:0 (should have been 2:0:1)
+# 2.1.3-alpha -- 2.1 3:0:0 (ABI changed slightly)
+# 2.1.4-alpha -- 2.1 4:0:0 (ABI changed slightly)
+# 2.1.5-beta -- 2.1 5:0:0 (ABI changed slightly)
+
+# ABI version history for this package effectively restarts every time
+# we change RELEASE. Version 1.4.x had RELEASE of 1.4.
+#
+# Ideally, we would not be using RELEASE at all; instead we could just
+# use the VERSION_INFO field to label our backward-incompatible ABI
+# changes, and those would be few and far between. Unfortunately,
+# Libevent still exposes far too many volatile structures in its
+# headers, so we pretty much have to assume that most development
+# series will break ABI compatibility. For now, it's simplest just to
+# keep incrementing the RELEASE between series and resetting VERSION_INFO.
+#
+# Eventually, when we get to the point where the structures in the
+# headers are all non-changing (or not there at all!), we can shift to
+# a more normal worldview where backward-incompatible ABI changes are
+# nice and rare. For the next couple of years, though, 'struct event'
+# is user-visible, and so we can pretty much guarantee that release
+# series won't be binary-compatible.
+
+if INSTALL_LIBEVENT
+dist_bin_SCRIPTS = event_rpcgen.py
+endif
+
+pkgconfigdir=$(libdir)/pkgconfig
+LIBEVENT_PKGCONFIG=libevent.pc
+
+# These sources are conditionally added by configure.ac or conditionally
+# included from other files.
+PLATFORM_DEPENDENT_SRC = \
+ arc4random.c \
+ epoll_sub.c
+
+EXTRA_DIST = \
+ ChangeLog-1.4 \
+ ChangeLog-2.0 \
+ Doxyfile \
+ LICENSE \
+ Makefile.nmake test/Makefile.nmake \
+ autogen.sh \
+ event_rpcgen.py \
+ libevent.pc.in \
+ make-event-config.sed \
+ whatsnew-2.0.txt \
+ whatsnew-2.1.txt \
+ $(PLATFORM_DEPENDENT_SRC)
+
+LIBEVENT_LIBS_LA = libevent.la libevent_core.la libevent_extra.la
+if PTHREADS
+LIBEVENT_LIBS_LA += libevent_pthreads.la
+LIBEVENT_PKGCONFIG += libevent_pthreads.pc
+endif
+if OPENSSL
+LIBEVENT_LIBS_LA += libevent_openssl.la
+LIBEVENT_PKGCONFIG += libevent_openssl.pc
+endif
+
+if INSTALL_LIBEVENT
+lib_LTLIBRARIES = $(LIBEVENT_LIBS_LA)
+pkgconfig_DATA = $(LIBEVENT_PKGCONFIG)
+else
+noinst_LTLIBRARIES = $(LIBEVENT_LIBS_LA)
+endif
+
+EXTRA_SOURCE=
+noinst_HEADERS=
+noinst_PROGRAMS=
+EXTRA_PROGRAMS=
+CLEANFILES=
+DISTCLEANFILES=
+BUILT_SOURCES =
+include include/include.am
+include sample/include.am
+include test/include.am
+
+if BUILD_WIN32
+
+SYS_LIBS = -lws2_32 -lshell32 -ladvapi32
+SYS_SRC = win32select.c evthread_win32.c buffer_iocp.c event_iocp.c \
+ bufferevent_async.c
+SYS_INCLUDES = -IWIN32-Code -IWIN32-Code/nmake
+
+else
+
+SYS_LIBS =
+SYS_SRC =
+SYS_INCLUDES =
+
+endif
+
+if STRLCPY_IMPL
+SYS_SRC += strlcpy.c
+endif
+if SELECT_BACKEND
+SYS_SRC += select.c
+endif
+if POLL_BACKEND
+SYS_SRC += poll.c
+endif
+if DEVPOLL_BACKEND
+SYS_SRC += devpoll.c
+endif
+if KQUEUE_BACKEND
+SYS_SRC += kqueue.c
+endif
+if EPOLL_BACKEND
+SYS_SRC += epoll.c
+endif
+if EVPORT_BACKEND
+SYS_SRC += evport.c
+endif
+if SIGNAL_SUPPORT
+SYS_SRC += signal.c
+endif
+
+BUILT_SOURCES += include/event2/event-config.h
+
+include/event2/event-config.h: config.h make-event-config.sed
+ $(AM_V_GEN)test -d include/event2 || $(MKDIR_P) include/event2
+ $(AM_V_at)$(SED) -f $(srcdir)/make-event-config.sed < config.h > $@T
+ $(AM_V_at)mv -f $@T $@
+
+CORE_SRC = \
+ buffer.c \
+ bufferevent.c \
+ bufferevent_filter.c \
+ bufferevent_pair.c \
+ bufferevent_ratelim.c \
+ bufferevent_sock.c \
+ event.c \
+ evmap.c \
+ evthread.c \
+ evutil.c \
+ evutil_rand.c \
+ evutil_time.c \
+ listener.c \
+ log.c \
+ $(SYS_SRC)
+
+EXTRAS_SRC = \
+ evdns.c \
+ event_tagging.c \
+ evrpc.c \
+ http.c
+
+if BUILD_WITH_NO_UNDEFINED
+NO_UNDEFINED = -no-undefined
+MAYBE_CORE = libevent_core.la
+else
+NO_UNDEFINED =
+MAYBE_CORE =
+endif
+
+GENERIC_LDFLAGS = -version-info $(VERSION_INFO) $(RELEASE) $(NO_UNDEFINED)
+
+libevent_la_SOURCES = $(CORE_SRC) $(EXTRAS_SRC)
+libevent_la_LIBADD = @LTLIBOBJS@ $(SYS_LIBS)
+libevent_la_LDFLAGS = $(GENERIC_LDFLAGS)
+
+libevent_core_la_SOURCES = $(CORE_SRC)
+libevent_core_la_LIBADD = @LTLIBOBJS@ $(SYS_LIBS)
+libevent_core_la_LDFLAGS = $(GENERIC_LDFLAGS)
+
+if PTHREADS
+libevent_pthreads_la_SOURCES = evthread_pthread.c
+libevent_pthreads_la_LIBADD = $(MAYBE_CORE)
+libevent_pthreads_la_LDFLAGS = $(GENERIC_LDFLAGS)
+endif
+
+libevent_extra_la_SOURCES = $(EXTRAS_SRC)
+libevent_extra_la_LIBADD = $(MAYBE_CORE) $(SYS_LIBS)
+libevent_extra_la_LDFLAGS = $(GENERIC_LDFLAGS)
+
+if OPENSSL
+libevent_openssl_la_SOURCES = bufferevent_openssl.c
+libevent_openssl_la_LIBADD = $(MAYBE_CORE) $(OPENSSL_LIBS)
+libevent_openssl_la_LDFLAGS = $(GENERIC_LDFLAGS)
+libevent_openssl_la_CPPFLAGS = $(AM_CPPFLAGS) $(OPENSSL_INCS)
+endif
+
+noinst_HEADERS += \
+ WIN32-Code/nmake/evconfig-private.h \
+ WIN32-Code/nmake/event2/event-config.h \
+ WIN32-Code/tree.h \
+ bufferevent-internal.h \
+ changelist-internal.h \
+ compat/sys/queue.h \
+ defer-internal.h \
+ epolltable-internal.h \
+ evbuffer-internal.h \
+ evconfig-private.h \
+ event-internal.h \
+ evmap-internal.h \
+ evrpc-internal.h \
+ evsignal-internal.h \
+ evthread-internal.h \
+ ht-internal.h \
+ http-internal.h \
+ iocp-internal.h \
+ ipv6-internal.h \
+ kqueue-internal.h \
+ log-internal.h \
+ minheap-internal.h \
+ mm-internal.h \
+ ratelim-internal.h \
+ ratelim-internal.h \
+ strlcpy-internal.h \
+ time-internal.h \
+ util-internal.h
+
+EVENT1_HDRS = \
+ include/evdns.h \
+ include/event.h \
+ include/evhttp.h \
+ include/evrpc.h \
+ include/evutil.h
+
+if INSTALL_LIBEVENT
+include_HEADERS = $(EVENT1_HDRS)
+else
+noinst_HEADERS += $(EVENT1_HDRS)
+endif
+
+AM_CPPFLAGS = -I$(srcdir)/compat -I$(srcdir)/include -I./include $(SYS_INCLUDES)
+
+verify: check
+
+doxygen: FORCE
+ doxygen $(srcdir)/Doxyfile
+FORCE:
+
+DISTCLEANFILES += *~ libevent.pc ./include/event2/event-config.h
+
diff --git a/libs/libevent/docs/Makefile.nmake b/libs/libevent/docs/Makefile.nmake
new file mode 100644
index 0000000000..f27cd61949
--- /dev/null
+++ b/libs/libevent/docs/Makefile.nmake
@@ -0,0 +1,82 @@
+# WATCH OUT! This makefile is a work in progress. -*- makefile -*-
+#
+# I'm not very knowledgeable about MSVC and nmake beyond their most basic
+# aspects. If anything here looks wrong to you, please let me know.
+
+# If OPENSSL_DIR is not set, builds without OpenSSL support. If you want
+# OpenSSL support, you can set the OPENSSL_DIR variable to where you
+# installed OpenSSL. This can be done in the environment:
+# set OPENSSL_DIR=c:\openssl
+# Or on the nmake command line:
+# nmake OPENSSL_DIR=C:\openssl -f Makefile.nmake
+# Or by uncommenting the following line here in the makefile...
+
+# OPENSSL_DIR=c:\openssl
+
+!IFDEF OPENSSL_DIR
+SSL_CFLAGS=/I$(OPENSSL_DIR)\include /DEVENT__HAVE_OPENSSL
+!ELSE
+SSL_CFLAGS=
+!ENDIF
+
+# Needed for correctness
+CFLAGS=/IWIN32-Code /IWIN32-Code/nmake /Iinclude /Icompat /DHAVE_CONFIG_H /I. $(SSL_CFLAGS)
+
+# For optimization and warnings
+CFLAGS=$(CFLAGS) /Ox /W3 /wd4996 /nologo
+
+# XXXX have a debug mode
+
+LIBFLAGS=/nologo
+
+CORE_OBJS=event.obj buffer.obj bufferevent.obj bufferevent_sock.obj \
+ bufferevent_pair.obj listener.obj evmap.obj log.obj evutil.obj \
+ strlcpy.obj signal.obj bufferevent_filter.obj evthread.obj \
+ bufferevent_ratelim.obj evutil_rand.obj evutil_time.obj
+WIN_OBJS=win32select.obj evthread_win32.obj buffer_iocp.obj \
+ event_iocp.obj bufferevent_async.obj
+EXTRA_OBJS=event_tagging.obj http.obj evdns.obj evrpc.obj
+
+!IFDEF OPENSSL_DIR
+SSL_OBJS=bufferevent_openssl.obj
+SSL_LIBS=libevent_openssl.lib
+!ELSE
+SSL_OBJS=
+SSL_LIBS=
+!ENDIF
+
+ALL_OBJS=$(CORE_OBJS) $(WIN_OBJS) $(EXTRA_OBJS) $(SSL_OBJS)
+STATIC_LIBS=libevent_core.lib libevent_extras.lib libevent.lib $(SSL_LIBS)
+
+
+all: static_libs tests
+
+static_libs: $(STATIC_LIBS)
+
+libevent_core.lib: $(CORE_OBJS) $(WIN_OBJS)
+ lib $(LIBFLAGS) $(CORE_OBJS) $(WIN_OBJS) /out:libevent_core.lib
+
+libevent_extras.lib: $(EXTRA_OBJS)
+ lib $(LIBFLAGS) $(EXTRA_OBJS) /out:libevent_extras.lib
+
+libevent.lib: $(CORE_OBJS) $(WIN_OBJS) $(EXTRA_OBJS)
+ lib $(LIBFLAGS) $(CORE_OBJS) $(EXTRA_OBJS) $(WIN_OBJS) /out:libevent.lib
+
+libevent_openssl.lib: $(SSL_OBJS)
+ lib $(LIBFLAGS) $(SSL_OBJS) /out:libevent_openssl.lib
+
+clean:
+ del $(ALL_OBJS)
+ del $(STATIC_LIBS)
+ cd test
+ $(MAKE) /F Makefile.nmake clean
+ cd ..
+
+tests:
+ cd test
+!IFDEF OPENSSL_DIR
+ $(MAKE) OPENSSL_DIR=$(OPENSSL_DIR) /F Makefile.nmake
+!ELSE
+ $(MAKE) /F Makefile.nmake
+!ENDIF
+ cd ..
diff --git a/libs/libevent/docs/README.md b/libs/libevent/docs/README.md
new file mode 100644
index 0000000000..fdd777ee8d
--- /dev/null
+++ b/libs/libevent/docs/README.md
@@ -0,0 +1,397 @@
+<p align="center">
+ <img src="https://strcpy.net/libevent3.png" alt="libevent logo"/>
+</p>
+
+
+
+[![Appveyor Win32 Build Status](https://ci.appveyor.com/api/projects/status/github/libevent/libevent?branch=master&svg=true)](https://ci.appveyor.com/project/nmathewson/libevent)
+[![Travis Build Status](https://travis-ci.org/libevent/libevent.svg?branch=master)](https://travis-ci.org/libevent/libevent)
+
+
+
+# 0. BUILDING AND INSTALLATION (Briefly)
+
+## Autoconf
+
+ $ ./configure
+ $ make
+ $ make verify # (optional)
+ $ sudo make install
+
+## Cmake (General)
+
+
+The following Libevent specific Cmake variables ar as follows (the values being
+the default).
+
+```
+# Installation directory for executables
+EVENT_INSTALL_BIN_DIR:PATH=bin
+
+# Installation directory for CMake files
+EVENT_INSTALL_CMAKE_DIR:PATH=lib/cmake/libevent
+
+## Installation directory for header files
+EVENT_INSTALL_INCLUDE_DIR:PATH=include
+
+## Installation directory for libraries
+EVENT_INSTALL_LIB_DIR:PATH=lib
+
+## Define if libevent should be built with shared libraries instead of archives
+EVENT__BUILD_SHARED_LIBRARIES:BOOL=OFF
+
+# Enable running gcov to get a test coverage report (only works with
+# GCC/CLang). Make sure to enable -DCMAKE_BUILD_TYPE=Debug as well.
+EVENT__COVERAGE:BOOL=OFF
+
+# Defines if libevent should build without the benchmark exectuables
+EVENT__DISABLE_BENCHMARK:BOOL=OFF
+
+# Define if libevent should build without support for a debug mode
+EVENT__DISABLE_DEBUG_MODE:BOOL=OFF
+
+# Define if libevent should not allow replacing the mm functions
+EVENT__DISABLE_MM_REPLACEMENT:BOOL=OFF
+
+# Define if libevent should build without support for OpenSSL encrpytion
+EVENT__DISABLE_OPENSSL:BOOL=ON
+
+# Disable the regress tests
+EVENT__DISABLE_REGRESS:BOOL=OFF
+
+# Disable sample files
+EVENT__DISABLE_SAMPLES:BOOL=OFF
+
+# If tests should be compiled or not
+EVENT__DISABLE_TESTS:BOOL=OFF
+
+# Define if libevent should not be compiled with thread support
+EVENT__DISABLE_THREAD_SUPPORT:BOOL=OFF
+
+# Enables verbose debugging
+EVENT__ENABLE_VERBOSE_DEBUG:BOOL=OFF
+
+# When crosscompiling forces running a test program that verifies that Kqueue
+# works with pipes. Note that this requires you to manually run the test program
+# on the the cross compilation target to verify that it works. See cmake
+# documentation for try_run for more details
+EVENT__FORCE_KQUEUE_CHECK:BOOL=OFF
+
+# set EVENT_STAGE_VERSION
+EVENT__STAGE_VERSION:STRING=beta
+```
+
+__More variables can be found by running `cmake -LAH <sourcedir_path>`__
+
+
+## CMake (Windows)
+
+Install CMake: <http://www.cmake.org>
+
+
+ $ md build && cd build
+ $ cmake -G "Visual Studio 10" .. # Or whatever generator you want to use cmake --help for a list.
+ $ start libevent.sln
+
+## CMake (Unix)
+
+ $ mkdir build && cd build
+ $ cmake .. # Default to Unix Makefiles.
+ $ make
+ $ make verify # (optional)
+
+
+# 1. BUILDING AND INSTALLATION (In Depth)
+
+## Autoconf
+
+To build libevent, type
+
+ $ ./configure && make
+
+
+ (If you got libevent from the git repository, you will
+ first need to run the included "autogen.sh" script in order to
+ generate the configure script.)
+
+You can run the regression tests by running
+
+ $ make verify
+
+Install as root via
+
+ $ make install
+
+Before reporting any problems, please run the regression tests.
+
+To enable the low-level tracing build the library as:
+
+ $ CFLAGS=-DUSE_DEBUG ./configure [...]
+
+Standard configure flags should work. In particular, see:
+
+ --disable-shared Only build static libraries
+ --prefix Install all files relative to this directory.
+
+
+The configure script also supports the following flags:
+
+ --enable-gcc-warnings Enable extra compiler checking with GCC.
+ --disable-malloc-replacement
+ Don't let applications replace our memory
+ management functions
+ --disable-openssl Disable support for OpenSSL encryption.
+ --disable-thread-support Don't support multithreaded environments.
+
+## CMake (Windows)
+
+(Note that autoconf is currently the most mature and supported build
+enviroment for libevent; the cmake instructions here are new and
+experimental, though they _should_ be solid. We hope that cmake will
+still be supported in future versions of Libevent, and will try to
+make sure that happens.)
+
+First of all install <http://www.cmake.org>.
+
+To build libevent using Microsoft Visual studio open the "Visual Studio Command prompt" and type:
+
+```
+$ cd <libevent source dir>
+$ mkdir build && cd build
+$ cmake -G "Visual Studio 10" .. # Or whatever generator you want to use cmake --help for a list.
+$ start libevent.sln
+```
+
+In the above, the ".." refers to the dir containing the Libevent source code.
+You can build multiple versions (with different compile time settings) from the same source tree
+by creating other build directories.
+
+It is highly recommended to build "out of source" when using
+CMake instead of "in source" like the normal behaviour of autoconf for this reason.
+
+The "NMake Makefiles" CMake generator can be used to build entirely via the command line.
+
+To get a list of settings available for the project you can type:
+
+```
+$ cmake -LH ..
+```
+
+### GUI
+
+CMake also provides a GUI that lets you specify the source directory and output (binary) directory
+that the build should be placed in.
+
+### OpenSSL support
+
+To build Libevent with OpenSSL support you will need to have OpenSSL binaries available when building,
+these can be found here: <http://www.openssl.org/related/binaries.html>
+
+# 2. USEFUL LINKS:
+
+For the latest released version of Libevent, see the official website at
+<http://libevent.org/> .
+
+There's a pretty good work-in-progress manual up at
+ <http://www.wangafu.net/~nickm/libevent-book/> .
+
+For the latest development versions of Libevent, access our Git repository
+via
+
+```
+$ git clone https://github.com/libevent/libevent.git
+```
+
+You can browse the git repository online at:
+
+<https://github.com/libevent/Libevent>
+
+To report bugs, issues, or ask for new features:
+
+__Patches__: https://github.com/libevent/libevent/pulls
+> OK, those are not really _patches_ You fork, modify, and hit the "Create Pull Request" button.
+> You can still submit normal git patchs via the mailing list.
+
+__Bugs, Features [RFC], and Issus__: https://github.com/libevent/libevent/issues
+> Or you can do it via the mailing list.
+
+There's also a libevent-users mailing list for talking about Libevent
+use and development:
+
+<http://archives.seul.org/libevent/users/>
+
+# 3. ACKNOWLEDGMENTS
+
+The following people have helped with suggestions, ideas, code or
+fixing bugs:
+
+ * Samy Al Bahra
+ * Antony Antony
+ * Jacob Appelbaum
+ * Arno Bakker
+ * Weston Andros Adamson
+ * William Ahern
+ * Ivan Andropov
+ * Sergey Avseyev
+ * Avi Bab
+ * Joachim Bauch
+ * Andrey Belobrov
+ * Gilad Benjamini
+ * Stas Bekman
+ * Denis Bilenko
+ * Julien Blache
+ * Kevin Bowling
+ * Tomash Brechko
+ * Kelly Brock
+ * Ralph Castain
+ * Adrian Chadd
+ * Lawnstein Chan
+ * Shuo Chen
+ * Ka-Hing Cheung
+ * Andrew Cox
+ * Paul Croome
+ * George Danchev
+ * Andrew Danforth
+ * Ed Day
+ * Christopher Davis
+ * Mike Davis
+ * Frank Denis
+ * Antony Dovgal
+ * Mihai Draghicioiu
+ * Alexander Drozdov
+ * Mark Ellzey
+ * Shie Erlich
+ * Leonid Evdokimov
+ * Juan Pablo Fernandez
+ * Christophe Fillot
+ * Mike Frysinger
+ * Remi Gacogne
+ * Artem Germanov
+ * Alexander von Gernler
+ * Diego Giagio
+ * Artur Grabowski
+ * Diwaker Gupta
+ * Kuldeep Gupta
+ * Sebastian Hahn
+ * Dave Hart
+ * Greg Hazel
+ * Nicholas Heath
+ * Michael Herf
+ * Sebastian Hahn
+ * Savg He
+ * Mark Heily
+ * Maxime Henrion
+ * Michael Herf
+ * Greg Hewgill
+ * Andrew Hochhaus
+ * Aaron Hopkins
+ * Tani Hosokawa
+ * Jamie Iles
+ * Xiuqiang Jiang
+ * Claudio Jeker
+ * Evan Jones
+ * Marcin Juszkiewicz
+ * George Kadianakis
+ * Makoto Kato
+ * Phua Keat
+ * Azat Khuzhin
+ * Alexander Klauer
+ * Kevin Ko
+ * Brian Koehmstedt
+ * Marko Kreen
+ * Ondřej Kuzník
+ * Valery Kyholodov
+ * Ross Lagerwall
+ * Scott Lamb
+ * Christopher Layne
+ * Adam Langley
+ * Graham Leggett
+ * Volker Lendecke
+ * Philip Lewis
+ * Zhou Li
+ * David Libenzi
+ * Yan Lin
+ * Moshe Litvin
+ * Simon Liu
+ * Mitchell Livingston
+ * Hagne Mahre
+ * Lubomir Marinov
+ * Abilio Marques
+ * Nicolas Martyanoff
+ * Abel Mathew
+ * Nick Mathewson
+ * James Mansion
+ * Nicholas Marriott
+ * Andrey Matveev
+ * Caitlin Mercer
+ * Dagobert Michelsen
+ * Andrea Montefusco
+ * Mansour Moufid
+ * Mina Naguib
+ * Felix Nawothnig
+ * Trond Norbye
+ * Linus Nordberg
+ * Richard Nyberg
+ * Jon Oberheide
+ * John Ohl
+ * Phil Oleson
+ * Alexey Ozeritsky
+ * Dave Pacheco
+ * Derrick Pallas
+ * Tassilo von Parseval
+ * Catalin Patulea
+ * Patrick Pelletier
+ * Simon Perreault
+ * Dan Petro
+ * Pierre Phaneuf
+ * Amarin Phaosawasdi
+ * Ryan Phillips
+ * Dimitre Piskyulev
+ * Pavel Plesov
+ * Jon Poland
+ * Roman Puls
+ * Nate R
+ * Robert Ransom
+ * Balint Reczey
+ * Bert JW Regeer
+ * Nate Rosenblum
+ * Peter Rosin
+ * Maseeb Abdul Qadir
+ * Wang Qin
+ * Alex S
+ * Gyepi Sam
+ * Hanna Schroeter
+ * Ralf Schmitt
+ * Mike Smellie
+ * Steve Snyder
+ * Nir Soffer
+ * Dug Song
+ * Dongsheng Song
+ * Hannes Sowa
+ * Joakim Soderberg
+ * Joseph Spadavecchia
+ * Kevin Springborn
+ * Harlan Stenn
+ * Andrew Sweeney
+ * Ferenc Szalai
+ * Brodie Thiesfield
+ * Jason Toffaletti
+ * Brian Utterback
+ * Gisle Vanem
+ * Bas Verhoeven
+ * Constantine Verutin
+ * Colin Watt
+ * Zack Weinberg
+ * Jardel Weyrich
+ * Jay R. Wren
+ * Zack Weinberg
+ * Mobai Zhang
+ * Alejo
+ * Alex
+ * Taral
+ * propanbutan
+ * masksqwe
+ * mmadia
+ * yangacer
+
+If we have forgotten your name, please contact us.
diff --git a/libs/libevent/docs/appveyor.yml b/libs/libevent/docs/appveyor.yml
new file mode 100644
index 0000000000..f9af62d2db
--- /dev/null
+++ b/libs/libevent/docs/appveyor.yml
@@ -0,0 +1,45 @@
+version: 2.1.5.{build}
+shallow_clone: true
+
+os: Visual Studio 2015 RC
+
+build:
+ verbosity: detailed
+
+environment:
+ global:
+ CYG_ROOT: C:/MinGW/msys/1.0
+
+init:
+ - 'echo Building libevent %version% for Windows'
+ - 'echo System architecture: %PLATFORM%'
+ - 'echo Repo build branch is: %APPVEYOR_REPO_BRANCH%'
+ - 'echo Build folder is: %APPVEYOR_BUILD_FOLDER%'
+
+install:
+ - set PATH=%PATH%;C:\MinGW\msys\1.0\bin;C:\MinGW\bin
+ - appveyor DownloadFile https://strcpy.net/packages/Win32OpenSSL-1_0_2a.exe
+ - Win32OpenSSL-1_0_2a.exe /silent /verysilent /sp- /suppressmsgboxes
+
+build_script:
+ - cmd: 'echo Cygwin root is: %CYG_ROOT%'
+ - cmd: 'echo Build folder is: %APPVEYOR_BUILD_FOLDER%'
+ - cmd: 'echo Repo build branch is: %APPVEYOR_REPO_BRANCH%'
+ - cmd: 'echo Repo build commit is: %APPVEYOR_REPO_COMMIT%'
+ - cmd: "echo installing stuff"
+ - cmd: 'echo "C:\MinGW /mingw" >%CYG_ROOT%/etc/fstab'
+ - cmd: 'C:\MinGW\bin\mingw-get install autotools autoconf automake python'
+ - cmd: 'echo Autogen running...'
+ - cmd: '%CYG_ROOT%/bin/bash -lc "cd $APPVEYOR_BUILD_FOLDER; exec 0</dev/null;mount C:/MinGW /mingw; bash -x ./autogen.sh; ./configure; make; make verify"'
+
+#install:
+# - appveyor DownloadFile https://strcpy.net/packages/Win32OpenSSL-1_0_2a.exe
+# - Win32OpenSSL-1_0_2a.exe /silent /verysilent /sp- /suppressmsgboxes
+#build_script:
+# - md build
+# - cd build
+# - cmake ..
+# - cmake --build .
+# - ctest --output-on-failure
+cache:
+ - C:\OpenSSL-Win32
diff --git a/libs/libevent/docs/autogen.sh b/libs/libevent/docs/autogen.sh
new file mode 100644
index 0000000000..57eeb940b9
--- /dev/null
+++ b/libs/libevent/docs/autogen.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+if [ -x "`which autoreconf 2>/dev/null`" ] ; then
+ exec autoreconf -ivf
+fi
+
+LIBTOOLIZE=libtoolize
+SYSNAME=`uname`
+if [ "x$SYSNAME" = "xDarwin" ] ; then
+ LIBTOOLIZE=glibtoolize
+fi
+aclocal -I m4 && \
+ autoheader && \
+ $LIBTOOLIZE && \
+ autoconf && \
+ automake --add-missing --force-missing --copy
diff --git a/libs/libevent/docs/cmake/AddCompilerFlags.cmake b/libs/libevent/docs/cmake/AddCompilerFlags.cmake
new file mode 100644
index 0000000000..c7da188b7a
--- /dev/null
+++ b/libs/libevent/docs/cmake/AddCompilerFlags.cmake
@@ -0,0 +1,15 @@
+include(CheckCCompilerFlag)
+
+macro(add_compiler_flags _flags)
+ foreach(flag ${_flags})
+ string(REGEX REPLACE "[-.+/:= ]" "_" _flag_esc "${flag}")
+
+ check_c_compiler_flag("${flag}" check_c_compiler_flag_${_flag_esc})
+
+ if (check_c_compiler_flag_${_flag_esc})
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}")
+ endif()
+ endforeach()
+endmacro()
+
+
diff --git a/libs/libevent/docs/cmake/COPYING-CMAKE-SCRIPTS b/libs/libevent/docs/cmake/COPYING-CMAKE-SCRIPTS
new file mode 100644
index 0000000000..ab3c4d25d1
--- /dev/null
+++ b/libs/libevent/docs/cmake/COPYING-CMAKE-SCRIPTS
@@ -0,0 +1,22 @@
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file
diff --git a/libs/libevent/docs/cmake/CheckFileOffsetBits.c b/libs/libevent/docs/cmake/CheckFileOffsetBits.c
new file mode 100644
index 0000000000..d948fecf2b
--- /dev/null
+++ b/libs/libevent/docs/cmake/CheckFileOffsetBits.c
@@ -0,0 +1,14 @@
+#include <sys/types.h>
+
+#define KB ((off_t)1024)
+#define MB ((off_t)1024 * KB)
+#define GB ((off_t)1024 * MB)
+#define TB ((off_t)1024 * GB)
+int t2[(((64 * GB -1) % 671088649) == 268434537)
+ && (((TB - (64 * GB -1) + 255) % 1792151290) == 305159546)? 1: -1];
+
+int main()
+{
+ ;
+ return 0;
+}
diff --git a/libs/libevent/docs/cmake/CheckFileOffsetBits.cmake b/libs/libevent/docs/cmake/CheckFileOffsetBits.cmake
new file mode 100644
index 0000000000..1253440160
--- /dev/null
+++ b/libs/libevent/docs/cmake/CheckFileOffsetBits.cmake
@@ -0,0 +1,43 @@
+# - Check if _FILE_OFFSET_BITS macro needed for large files
+# CHECK_FILE_OFFSET_BITS ()
+#
+# The following variables may be set before calling this macro to
+# modify the way the check is run:
+#
+# CMAKE_REQUIRED_FLAGS = string of compile command line flags
+# CMAKE_REQUIRED_DEFINITIONS = list of macros to define (-DFOO=bar)
+# CMAKE_REQUIRED_INCLUDES = list of include directories
+# Copyright (c) 2009, Michihiro NAKAJIMA
+#
+# Redistribution and use is allowed according to the terms of the BSD license.
+# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
+
+#INCLUDE(CheckCSourceCompiles)
+
+GET_FILENAME_COMPONENT(_selfdir_CheckFileOffsetBits
+ "${CMAKE_CURRENT_LIST_FILE}" PATH)
+
+MACRO (CHECK_FILE_OFFSET_BITS)
+ IF(NOT DEFINED _FILE_OFFSET_BITS)
+ MESSAGE(STATUS "Cheking _FILE_OFFSET_BITS for large files")
+ TRY_COMPILE(__WITHOUT_FILE_OFFSET_BITS_64
+ ${CMAKE_CURRENT_BINARY_DIR}
+ ${_selfdir_CheckFileOffsetBits}/CheckFileOffsetBits.c
+ COMPILE_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS})
+ IF(NOT __WITHOUT_FILE_OFFSET_BITS_64)
+ TRY_COMPILE(__WITH_FILE_OFFSET_BITS_64
+ ${CMAKE_CURRENT_BINARY_DIR}
+ ${_selfdir_CheckFileOffsetBits}/CheckFileOffsetBits.c
+ COMPILE_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} -D_FILE_OFFSET_BITS=64)
+ ENDIF(NOT __WITHOUT_FILE_OFFSET_BITS_64)
+
+ IF(NOT __WITHOUT_FILE_OFFSET_BITS_64 AND __WITH_FILE_OFFSET_BITS_64)
+ SET(_FILE_OFFSET_BITS 64 CACHE INTERNAL "_FILE_OFFSET_BITS macro needed for large files")
+ MESSAGE(STATUS "Cheking _FILE_OFFSET_BITS for large files - needed")
+ ELSE(NOT __WITHOUT_FILE_OFFSET_BITS_64 AND __WITH_FILE_OFFSET_BITS_64)
+ SET(_FILE_OFFSET_BITS "" CACHE INTERNAL "_FILE_OFFSET_BITS macro needed for large files")
+ MESSAGE(STATUS "Cheking _FILE_OFFSET_BITS for large files - not needed")
+ ENDIF(NOT __WITHOUT_FILE_OFFSET_BITS_64 AND __WITH_FILE_OFFSET_BITS_64)
+ ENDIF(NOT DEFINED _FILE_OFFSET_BITS)
+
+ENDMACRO (CHECK_FILE_OFFSET_BITS)
diff --git a/libs/libevent/docs/cmake/CheckFunctionExistsEx.c b/libs/libevent/docs/cmake/CheckFunctionExistsEx.c
new file mode 100644
index 0000000000..5ee3e5913a
--- /dev/null
+++ b/libs/libevent/docs/cmake/CheckFunctionExistsEx.c
@@ -0,0 +1,30 @@
+#ifdef CHECK_FUNCTION_EXISTS
+
+#ifndef _WIN32
+char CHECK_FUNCTION_EXISTS();
+#endif
+
+#ifdef __CLASSIC_C__
+int main(){
+ int ac;
+ char*av[];
+#else
+int main(int ac, char*av[]){
+#endif
+#ifdef _WIN32
+ void * p = &CHECK_FUNCTION_EXISTS;
+#else
+ CHECK_FUNCTION_EXISTS();
+#endif
+ if(ac > 1000)
+ {
+ return *av[0];
+ }
+ return 0;
+}
+
+#else /* CHECK_FUNCTION_EXISTS */
+
+# error "CHECK_FUNCTION_EXISTS has to specify the function"
+
+#endif /* CHECK_FUNCTION_EXISTS */
diff --git a/libs/libevent/docs/cmake/CheckFunctionExistsEx.cmake b/libs/libevent/docs/cmake/CheckFunctionExistsEx.cmake
new file mode 100644
index 0000000000..f513f4e108
--- /dev/null
+++ b/libs/libevent/docs/cmake/CheckFunctionExistsEx.cmake
@@ -0,0 +1,69 @@
+# - Check if a C function can be linked
+# CHECK_FUNCTION_EXISTS(<function> <variable>)
+#
+# Check that the <function> is provided by libraries on the system and
+# store the result in a <variable>. This does not verify that any
+# system header file declares the function, only that it can be found
+# at link time (considure using CheckSymbolExists).
+#
+# The following variables may be set before calling this macro to
+# modify the way the check is run:
+#
+# CMAKE_REQUIRED_FLAGS = string of compile command line flags
+# CMAKE_REQUIRED_DEFINITIONS = list of macros to define (-DFOO=bar)
+# CMAKE_REQUIRED_INCLUDES = list of include directories
+# CMAKE_REQUIRED_LIBRARIES = list of libraries to link
+
+#=============================================================================
+# Copyright 2002-2011 Kitware, Inc.
+#
+# Distributed under the OSI-approved BSD License (the "License");
+# see accompanying file Copyright.txt for details.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+#=============================================================================
+# (To distribute this file outside of CMake, substitute the full
+# License text for the above reference.)
+
+MACRO(CHECK_FUNCTION_EXISTS_EX FUNCTION VARIABLE)
+ IF("${VARIABLE}" MATCHES "^${VARIABLE}$")
+ SET(MACRO_CHECK_FUNCTION_DEFINITIONS
+ "-DCHECK_FUNCTION_EXISTS=${FUNCTION} ${CMAKE_REQUIRED_FLAGS}")
+ MESSAGE(STATUS "Looking for ${FUNCTION}")
+ IF(CMAKE_REQUIRED_LIBRARIES)
+ SET(CHECK_FUNCTION_EXISTS_ADD_LIBRARIES
+ "-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}")
+ ELSE(CMAKE_REQUIRED_LIBRARIES)
+ SET(CHECK_FUNCTION_EXISTS_ADD_LIBRARIES)
+ ENDIF(CMAKE_REQUIRED_LIBRARIES)
+ IF(CMAKE_REQUIRED_INCLUDES)
+ SET(CHECK_FUNCTION_EXISTS_ADD_INCLUDES
+ "-DINCLUDE_DIRECTORIES:STRING=${CMAKE_REQUIRED_INCLUDES}")
+ ELSE(CMAKE_REQUIRED_INCLUDES)
+ SET(CHECK_FUNCTION_EXISTS_ADD_INCLUDES)
+ ENDIF(CMAKE_REQUIRED_INCLUDES)
+ TRY_COMPILE(${VARIABLE}
+ ${CMAKE_BINARY_DIR}
+ ${PROJECT_SOURCE_DIR}/cmake/CheckFunctionExistsEx.c
+ COMPILE_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS}
+ CMAKE_FLAGS -DCOMPILE_DEFINITIONS:STRING=${MACRO_CHECK_FUNCTION_DEFINITIONS}
+ "${CHECK_FUNCTION_EXISTS_ADD_LIBRARIES}"
+ "${CHECK_FUNCTION_EXISTS_ADD_INCLUDES}"
+ OUTPUT_VARIABLE OUTPUT)
+ IF(${VARIABLE})
+ SET(${VARIABLE} 1 CACHE INTERNAL "Have function ${FUNCTION}")
+ MESSAGE(STATUS "Looking for ${FUNCTION} - found")
+ FILE(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log
+ "Determining if the function ${FUNCTION} exists passed with the following output:\n"
+ "${OUTPUT}\n\n")
+ ELSE(${VARIABLE})
+ MESSAGE(STATUS "Looking for ${FUNCTION} - not found")
+ SET(${VARIABLE} "" CACHE INTERNAL "Have function ${FUNCTION}")
+ FILE(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
+ "Determining if the function ${FUNCTION} exists failed with the following output:\n"
+ "${OUTPUT}\n\n")
+ ENDIF(${VARIABLE})
+ ENDIF("${VARIABLE}" MATCHES "^${VARIABLE}$")
+ENDMACRO(CHECK_FUNCTION_EXISTS_EX)
diff --git a/libs/libevent/docs/cmake/CheckFunctionKeywords.cmake b/libs/libevent/docs/cmake/CheckFunctionKeywords.cmake
new file mode 100644
index 0000000000..3d968b8a6b
--- /dev/null
+++ b/libs/libevent/docs/cmake/CheckFunctionKeywords.cmake
@@ -0,0 +1,14 @@
+include(CheckCSourceCompiles)
+
+macro(check_function_keywords _wordlist)
+ set(${_result} "")
+ foreach(flag ${_wordlist})
+ string(REGEX REPLACE "[-+/ ()]" "_" flagname "${flag}")
+ string(TOUPPER "${flagname}" flagname)
+ set(have_flag "HAVE_${flagname}")
+ check_c_source_compiles("${flag} void func(); void func() { } int main() { func(); return 0; }" ${have_flag})
+ if(${have_flag} AND NOT ${_result})
+ set(${_result} "${flag}")
+ endif(${have_flag} AND NOT ${_result})
+ endforeach(flag)
+endmacro(check_function_keywords)
diff --git a/libs/libevent/docs/cmake/CheckPrototypeDefinition.c.in b/libs/libevent/docs/cmake/CheckPrototypeDefinition.c.in
new file mode 100644
index 0000000000..a97344ac3e
--- /dev/null
+++ b/libs/libevent/docs/cmake/CheckPrototypeDefinition.c.in
@@ -0,0 +1,29 @@
+@CHECK_PROTOTYPE_DEFINITION_HEADER@
+
+static void cmakeRequireSymbol(int dummy, ...) {
+ (void) dummy;
+}
+
+static void checkSymbol(void) {
+#ifndef @CHECK_PROTOTYPE_DEFINITION_SYMBOL@
+ cmakeRequireSymbol(0, &@CHECK_PROTOTYPE_DEFINITION_SYMBOL@);
+#endif
+}
+
+@CHECK_PROTOTYPE_DEFINITION_PROTO@ {
+ return @CHECK_PROTOTYPE_DEFINITION_RETURN@;
+}
+
+#ifdef __CLASSIC_C__
+int main() {
+ int ac;
+ char*av[];
+#else
+int main(int ac, char *av[]) {
+#endif
+ checkSymbol();
+ if (ac > 1000) {
+ return *av[0];
+ }
+ return 0;
+}
diff --git a/libs/libevent/docs/cmake/CheckPrototypeDefinition.cmake b/libs/libevent/docs/cmake/CheckPrototypeDefinition.cmake
new file mode 100644
index 0000000000..e0c6a572c0
--- /dev/null
+++ b/libs/libevent/docs/cmake/CheckPrototypeDefinition.cmake
@@ -0,0 +1,84 @@
+# - Check if the protoype we expect is correct.
+# check_prototype_definition(FUNCTION PROTOTYPE RETURN HEADER VARIABLE)
+#
+# FUNCTION - The name of the function (used to check if prototype exists)
+# PROTOTYPE- The prototype to check.
+# RETURN - The return value of the function.
+# HEADER - The header files required.
+# VARIABLE - The variable to store the result.
+#
+# Example:
+#
+# check_prototype_definition(getpwent_r
+# "struct passwd *getpwent_r(struct passwd *src, char *buf, int buflen)"
+# "NULL"
+# "unistd.h;pwd.h"
+# SOLARIS_GETPWENT_R)
+#
+# The following variables may be set before calling this macro to
+# modify the way the check is run:
+#
+# CMAKE_REQUIRED_FLAGS = string of compile command line flags
+# CMAKE_REQUIRED_DEFINITIONS = list of macros to define (-DFOO=bar)
+# CMAKE_REQUIRED_INCLUDES = list of include directories
+# CMAKE_REQUIRED_LIBRARIES = list of libraries to link
+
+
+function(CHECK_PROTOTYPE_DEFINITION _FUNCTION _PROTOTYPE _RETURN _HEADER _VARIABLE)
+
+ if ("${_VARIABLE}" MATCHES "^${_VARIABLE}$")
+ set(CHECK_PROTOTYPE_DEFINITION_CONTENT "/* */\n")
+
+ set(CHECK_PROTOTYPE_DEFINITION_FLAGS ${CMAKE_REQUIRED_FLAGS})
+ if (CMAKE_REQUIRED_LIBRARIES)
+ set(CHECK_PROTOTYPE_DEFINITION_LIBS
+ "-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}")
+ else(CMAKE_REQUIRED_LIBRARIES)
+ set(CHECK_PROTOTYPE_DEFINITION_LIBS)
+ endif(CMAKE_REQUIRED_LIBRARIES)
+ if (CMAKE_REQUIRED_INCLUDES)
+ set(CMAKE_SYMBOL_EXISTS_INCLUDES
+ "-DINCLUDE_DIRECTORIES:STRING=${CMAKE_REQUIRED_INCLUDES}")
+ else(CMAKE_REQUIRED_INCLUDES)
+ set(CMAKE_SYMBOL_EXISTS_INCLUDES)
+ endif(CMAKE_REQUIRED_INCLUDES)
+
+ foreach(_FILE ${_HEADER})
+ set(CHECK_PROTOTYPE_DEFINITION_HEADER
+ "${CHECK_PROTOTYPE_DEFINITION_HEADER}#include <${_FILE}>\n")
+ endforeach(_FILE)
+
+ set(CHECK_PROTOTYPE_DEFINITION_SYMBOL ${_FUNCTION})
+ set(CHECK_PROTOTYPE_DEFINITION_PROTO ${_PROTOTYPE})
+ set(CHECK_PROTOTYPE_DEFINITION_RETURN ${_RETURN})
+
+ configure_file("${PROJECT_SOURCE_DIR}/cmake/CheckPrototypeDefinition.c.in"
+ "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/CheckPrototypeDefinition.c" @ONLY)
+
+ file(READ ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/CheckPrototypeDefinition.c _SOURCE)
+
+ try_compile(${_VARIABLE}
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/CheckPrototypeDefinition.c
+ COMPILE_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS}
+ CMAKE_FLAGS -DCOMPILE_DEFINITIONS:STRING=${CHECK_PROTOTYPE_DEFINITION_FLAGS}
+ "${CHECK_PROTOTYPE_DEFINITION_LIBS}"
+ "${CMAKE_SYMBOL_EXISTS_INCLUDES}"
+ OUTPUT_VARIABLE OUTPUT)
+
+ if (${_VARIABLE})
+ set(${_VARIABLE} 1 CACHE INTERNAL "Have correct prototype for ${_FUNCTION}")
+ message(STATUS "Checking prototype ${_FUNCTION} for ${_VARIABLE} - True")
+ file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log
+ "Determining if the prototype ${_FUNCTION} exists for ${_VARIABLE} passed with the following output:\n"
+ "${OUTPUT}\n\n")
+ else (${_VARIABLE})
+ message(STATUS "Checking prototype ${_FUNCTION} for ${_VARIABLE} - False")
+ set(${_VARIABLE} 0 CACHE INTERNAL "Have correct prototype for ${_FUNCTION}")
+ file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
+ "Determining if the prototype ${_FUNCTION} exists for ${_VARIABLE} failed with the following output:\n"
+ "${OUTPUT}\n\n${_SOURCE}\n\n")
+ endif (${_VARIABLE})
+ endif("${_VARIABLE}" MATCHES "^${_VARIABLE}$")
+
+endfunction(CHECK_PROTOTYPE_DEFINITION)
diff --git a/libs/libevent/docs/cmake/CheckWorkingKqueue.cmake b/libs/libevent/docs/cmake/CheckWorkingKqueue.cmake
new file mode 100644
index 0000000000..47bf4e838a
--- /dev/null
+++ b/libs/libevent/docs/cmake/CheckWorkingKqueue.cmake
@@ -0,0 +1,52 @@
+include(CheckCSourceRuns)
+
+check_c_source_runs(
+"
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/event.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+int
+main(int argc, char **argv)
+{
+ int kq;
+ int n;
+ int fd[2];
+ struct kevent ev;
+ struct timespec ts;
+ char buf[8000];
+
+ if (pipe(fd) == -1)
+ exit(1);
+ if (fcntl(fd[1], F_SETFL, O_NONBLOCK) == -1)
+ exit(1);
+
+ while ((n = write(fd[1], buf, sizeof(buf))) == sizeof(buf))
+ ;
+
+ if ((kq = kqueue()) == -1)
+ exit(1);
+
+ memset(&ev, 0, sizeof(ev));
+ ev.ident = fd[1];
+ ev.filter = EVFILT_WRITE;
+ ev.flags = EV_ADD | EV_ENABLE;
+ n = kevent(kq, &ev, 1, NULL, 0, NULL);
+ if (n == -1)
+ exit(1);
+
+ read(fd[0], buf, sizeof(buf));
+
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+ n = kevent(kq, NULL, 0, &ev, 1, &ts);
+ if (n == -1 || n == 0)
+ exit(1);
+
+ exit(0);
+}
+
+" EVENT__HAVE_WORKING_KQUEUE) \ No newline at end of file
diff --git a/libs/libevent/docs/cmake/CodeCoverage.cmake b/libs/libevent/docs/cmake/CodeCoverage.cmake
new file mode 100644
index 0000000000..969f273253
--- /dev/null
+++ b/libs/libevent/docs/cmake/CodeCoverage.cmake
@@ -0,0 +1,162 @@
+#
+# Boost Software License - Version 1.0 - August 17th, 2003
+#
+# Permission is hereby granted, free of charge, to any person or organization
+# obtaining a copy of the software and accompanying documentation covered by
+# this license (the "Software") to use, reproduce, display, distribute,
+# execute, and transmit the Software, and to prepare derivative works of the
+# Software, and to permit third-parties to whom the Software is furnished to
+# do so, all subject to the following:
+#
+# The copyright notices in the Software and this entire statement, including
+# the above license grant, this restriction and the following disclaimer,
+# must be included in all copies of the Software, in whole or in part, and
+# all derivative works of the Software, unless such copies or derivative
+# works are solely in the form of machine-executable object code generated by
+# a source language processor.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+# SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+# FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+#
+# 2012-01-31, Lars Bilke
+# - Enable Code Coverage
+#
+# 2013-09-17, Joakim Söderberg
+# - Added support for Clang.
+# - Some additional usage instructions.
+#
+# USAGE:
+# 1. Copy this file into your cmake modules path.
+#
+# 2. Add the following line to your CMakeLists.txt:
+# INCLUDE(CodeCoverage)
+#
+# 3. Set compiler flags to turn off optimization and enable coverage:
+# SET(CMAKE_CXX_FLAGS "-g -O0 -fprofile-arcs -ftest-coverage")
+# SET(CMAKE_C_FLAGS "-g -O0 -fprofile-arcs -ftest-coverage")
+#
+# 3. Use the function SETUP_TARGET_FOR_COVERAGE to create a custom make target
+# which runs your test executable and produces a lcov code coverage report:
+# Example:
+# SETUP_TARGET_FOR_COVERAGE(
+# my_coverage_target # Name for custom target.
+# test_driver # Name of the test driver executable that runs the tests.
+# # NOTE! This should always have a ZERO as exit code
+# # otherwise the coverage generation will not complete.
+# coverage # Name of output directory.
+# )
+#
+# 4. Build a Debug build:
+# cmake -DCMAKE_BUILD_TYPE=Debug ..
+# make
+# make my_coverage_target
+#
+#
+
+# Check prereqs
+FIND_PROGRAM( GCOV_PATH gcov )
+FIND_PROGRAM( LCOV_PATH lcov )
+FIND_PROGRAM( GENHTML_PATH genhtml )
+FIND_PROGRAM( GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/tests)
+
+IF(NOT GCOV_PATH)
+ MESSAGE(FATAL_ERROR "gcov not found! Aborting...")
+ENDIF() # NOT GCOV_PATH
+
+IF(NOT CMAKE_COMPILER_IS_GNUCC AND NOT CMAKE_COMPILER_IS_GNUCXX)
+ # Clang version 3.0.0 and greater now supports gcov as well.
+ MESSAGE(WARNING "Compiler is not GNU gcc! Clang Version 3.0.0 and greater supports gcov as well, but older versions don't.")
+
+ IF(NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
+ MESSAGE(FATAL_ERROR "Compiler is not GNU gcc! Aborting...")
+ ENDIF()
+ENDIF() # NOT CMAKE_COMPILER_IS_GNUCC
+
+IF ( NOT CMAKE_BUILD_TYPE STREQUAL "Debug" )
+ MESSAGE( WARNING "Code coverage results with an optimized (non-Debug) build may be misleading" )
+ENDIF() # NOT CMAKE_BUILD_TYPE STREQUAL "Debug"
+
+
+# Param _targetname The name of new the custom make target
+# Param _testrunner The name of the target which runs the tests.
+# MUST return ZERO always, even on errors.
+# If not, no coverage report will be created!
+# Param _outputname lcov output is generated as _outputname.info
+# HTML report is generated in _outputname/index.html
+# Optional fourth parameter is passed as arguments to _testrunner
+# Pass them in list form, e.g.: "-j;2" for -j 2
+FUNCTION(SETUP_TARGET_FOR_COVERAGE _targetname _testrunner _outputname)
+
+ IF(NOT LCOV_PATH)
+ MESSAGE(FATAL_ERROR "lcov not found! Aborting...")
+ ENDIF() # NOT LCOV_PATH
+
+ IF(NOT GENHTML_PATH)
+ MESSAGE(FATAL_ERROR "genhtml not found! Aborting...")
+ ENDIF() # NOT GENHTML_PATH
+
+ # Setup target
+ ADD_CUSTOM_TARGET(${_targetname}
+
+ # Cleanup lcov
+ ${LCOV_PATH} --directory . --zerocounters
+
+ # Run tests
+ COMMAND ${_testrunner} ${ARGV3}
+
+ # Capturing lcov counters and generating report
+ COMMAND ${LCOV_PATH} --directory . --capture --output-file ${_outputname}.info
+ COMMAND ${LCOV_PATH} --remove ${_outputname}.info 'tests/*' '/usr/*' --output-file ${_outputname}.info.cleaned
+ COMMAND ${GENHTML_PATH} -o ${_outputname} ${_outputname}.info.cleaned
+ COMMAND ${CMAKE_COMMAND} -E remove ${_outputname}.info ${_outputname}.info.cleaned
+
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
+ COMMENT "Resetting code coverage counters to zero.\nProcessing code coverage counters and generating report."
+ )
+
+ # Show info where to find the report
+ ADD_CUSTOM_COMMAND(TARGET ${_targetname} POST_BUILD
+ COMMAND ;
+ COMMENT "Open ./${_outputname}/index.html in your browser to view the coverage report."
+ )
+
+ENDFUNCTION() # SETUP_TARGET_FOR_COVERAGE
+
+# Param _targetname The name of new the custom make target
+# Param _testrunner The name of the target which runs the tests
+# Param _outputname cobertura output is generated as _outputname.xml
+# Optional fourth parameter is passed as arguments to _testrunner
+# Pass them in list form, e.g.: "-j;2" for -j 2
+FUNCTION(SETUP_TARGET_FOR_COVERAGE_COBERTURA _targetname _testrunner _outputname)
+
+ IF(NOT PYTHON_EXECUTABLE)
+ MESSAGE(FATAL_ERROR "Python not found! Aborting...")
+ ENDIF() # NOT PYTHON_EXECUTABLE
+
+ IF(NOT GCOVR_PATH)
+ MESSAGE(FATAL_ERROR "gcovr not found! Aborting...")
+ ENDIF() # NOT GCOVR_PATH
+
+ ADD_CUSTOM_TARGET(${_targetname}
+
+ # Run tests
+ ${_testrunner} ${ARGV3}
+
+ # Running gcovr
+ COMMAND ${GCOVR_PATH} -x -r ${CMAKE_SOURCE_DIR} -e '${CMAKE_SOURCE_DIR}/tests/' -o ${_outputname}.xml
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
+ COMMENT "Running gcovr to produce Cobertura code coverage report."
+ )
+
+ # Show info where to find the report
+ ADD_CUSTOM_COMMAND(TARGET ${_targetname} POST_BUILD
+ COMMAND ;
+ COMMENT "Cobertura code coverage report saved in ${_outputname}.xml."
+ )
+
+ENDFUNCTION() # SETUP_TARGET_FOR_COVERAGE_COBERTURA
diff --git a/libs/libevent/docs/cmake/Copyright.txt b/libs/libevent/docs/cmake/Copyright.txt
new file mode 100644
index 0000000000..813124f02e
--- /dev/null
+++ b/libs/libevent/docs/cmake/Copyright.txt
@@ -0,0 +1,57 @@
+CMake - Cross Platform Makefile Generator
+Copyright 2000-2013 Kitware, Inc.
+Copyright 2000-2011 Insight Software Consortium
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+* Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+* Neither the names of Kitware, Inc., the Insight Software Consortium,
+ nor the names of their contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+------------------------------------------------------------------------------
+
+The above copyright and license notice applies to distributions of
+CMake in source and binary form. Some source files contain additional
+notices of original copyright by their contributors; see each source
+for details. Third-party software packages supplied with CMake under
+compatible licenses provide their own copyright notices documented in
+corresponding subdirectories.
+
+------------------------------------------------------------------------------
+
+CMake was initially developed by Kitware with the following sponsorship:
+
+ * National Library of Medicine at the National Institutes of Health
+ as part of the Insight Segmentation and Registration Toolkit (ITK).
+
+ * US National Labs (Los Alamos, Livermore, Sandia) ASC Parallel
+ Visualization Initiative.
+
+ * National Alliance for Medical Image Computing (NAMIC) is funded by the
+ National Institutes of Health through the NIH Roadmap for Medical Research,
+ Grant U54 EB005149.
+
+ * Kitware, Inc. \ No newline at end of file
diff --git a/libs/libevent/docs/cmake/FindGit.cmake b/libs/libevent/docs/cmake/FindGit.cmake
new file mode 100644
index 0000000000..2abbfe4e9d
--- /dev/null
+++ b/libs/libevent/docs/cmake/FindGit.cmake
@@ -0,0 +1,45 @@
+# The module defines the following variables:
+# GIT_EXECUTABLE - path to git command line client
+# GIT_FOUND - true if the command line client was found
+# Example usage:
+# find_package(Git)
+# if(GIT_FOUND)
+# message("git found: ${GIT_EXECUTABLE}")
+# endif()
+
+#=============================================================================
+# Copyright 2010 Kitware, Inc.
+#
+# Distributed under the OSI-approved BSD License (the "License");
+# see accompanying file Copyright.txt for details.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+#=============================================================================
+# (To distributed this file outside of CMake, substitute the full
+# License text for the above reference.)
+
+# Look for 'git' or 'eg' (easy git)
+set(git_names git eg)
+
+# Prefer .cmd variants on Windows unless running in a Makefile
+# in the MSYS shell.
+if(WIN32)
+ if(NOT CMAKE_GENERATOR MATCHES "MSYS")
+ set(git_names git.cmd git eg.cmd eg)
+ endif()
+endif()
+
+find_program(GIT_EXECUTABLE
+ NAMES ${git_names}
+ DOC "git command line client")
+
+mark_as_advanced(GIT_EXECUTABLE)
+
+# Handle the QUIETLY and REQUIRED arguments and set GIT_FOUND to TRUE if
+# all listed variables are TRUE
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(Git DEFAULT_MSG GIT_EXECUTABLE)
+
diff --git a/libs/libevent/docs/cmake/LibeventConfig.cmake.in b/libs/libevent/docs/cmake/LibeventConfig.cmake.in
new file mode 100644
index 0000000000..b28cacb5fb
--- /dev/null
+++ b/libs/libevent/docs/cmake/LibeventConfig.cmake.in
@@ -0,0 +1,17 @@
+# - Config file for the Libevent package
+# It defines the following variables
+# LIBEVENT_INCLUDE_DIRS - include directories for FooBar
+# LIBEVENT_LIBRARIES - libraries to link against
+
+# Get the path of the current file.
+get_filename_component(LIBEVENT_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
+
+# Set the include directories.
+set(LIBEVENT_INCLUDE_DIRS "@EVENT_INSTALL_INCLUDE_DIR@")
+
+# Include the project Targets file, this contains definitions for IMPORTED targets.
+include(${LIBEVENT_CMAKE_DIR}/LibeventTargets.cmake)
+
+# IMPORTED targets from LibeventTargets.cmake
+set(LIBEVENT_LIBRARIES event event_core event_extra)
+
diff --git a/libs/libevent/docs/cmake/LibeventConfigBuildTree.cmake.in b/libs/libevent/docs/cmake/LibeventConfigBuildTree.cmake.in
new file mode 100644
index 0000000000..02edef32fc
--- /dev/null
+++ b/libs/libevent/docs/cmake/LibeventConfigBuildTree.cmake.in
@@ -0,0 +1,17 @@
+# - Config file for the Libevent package
+# It defines the following variables
+# LIBEVENT_INCLUDE_DIRS - include directories for FooBar
+# LIBEVENT_LIBRARIES - libraries to link against
+
+# Get the path of the current file.
+get_filename_component(LIBEVENT_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
+
+# Set the include directories.
+set(LIBEVENT_INCLUDE_DIRS "@EVENT__INCLUDE_DIRS@")
+
+# Include the project Targets file, this contains definitions for IMPORTED targets.
+include(${LIBEVENT_CMAKE_DIR}/LibeventTargets.cmake)
+
+# IMPORTED targets from LibeventTargets.cmake
+set(LIBEVENT_LIBRARIES event event_core event_extra)
+
diff --git a/libs/libevent/docs/cmake/LibeventConfigVersion.cmake.in b/libs/libevent/docs/cmake/LibeventConfigVersion.cmake.in
new file mode 100644
index 0000000000..56371a8fee
--- /dev/null
+++ b/libs/libevent/docs/cmake/LibeventConfigVersion.cmake.in
@@ -0,0 +1,11 @@
+set(PACKAGE_VERSION "@EVENT_PACKAGE_VERSION@")
+
+# Check whether the requested PACKAGE_FIND_VERSION is compatible
+if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}")
+ set(PACKAGE_VERSION_COMPATIBLE FALSE)
+else()
+ set(PACKAGE_VERSION_COMPATIBLE TRUE)
+ if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}")
+ set(PACKAGE_VERSION_EXACT TRUE)
+ endif()
+endif()
diff --git a/libs/libevent/docs/cmake/VersionViaGit.cmake b/libs/libevent/docs/cmake/VersionViaGit.cmake
new file mode 100644
index 0000000000..f183a64713
--- /dev/null
+++ b/libs/libevent/docs/cmake/VersionViaGit.cmake
@@ -0,0 +1,53 @@
+# This module defines the following variables utilizing
+# git to determine the parent tag. And if found the macro
+# will attempt to parse them in the github tag fomat
+#
+# Usful for auto-versionin in ou CMakeLists
+#
+# EVENT_GIT___VERSION_FOUND - Version variables foud
+# EVENT_GIT___VERSION_MAJOR - Major version.
+# EVENT_GIT___VERSION_MINOR - Minor version
+# EVENT_GIT___VERSION_STAGE - Stage version
+#
+# Example usage:
+#
+# event_fuzzy_version_from_git()
+# if (EVENT_GIT___VERSION_FOUND)
+# message("Libvent major=${EVENT_GIT___VERSION_MAJOR}")
+# message(" minor=${EVENT_GIT___VERSION_MINOR}")
+# message(" patch=${EVENT_GIT___VERSION_PATCH}")
+# message(" stage=${EVENT_GIT___VERSION_STAGE}")
+# endif()
+
+include(FindGit)
+
+macro(event_fuzzy_version_from_git)
+ set(EVENT_GIT___VERSION_FOUND FALSE)
+
+ # set our defaults.
+ set(EVENT_GIT___VERSION_MAJOR 2)
+ set(EVENT_GIT___VERSION_MINOR 1)
+ set(EVENT_GIT___VERSION_PATCH 5)
+ set(EVENT_GIT___VERSION_STAGE "beta")
+
+ find_package(Git)
+
+ if (GIT_FOUND)
+ execute_process(
+ COMMAND
+ ${GIT_EXECUTABLE} describe --abbrev=0
+ WORKING_DIRECTORY
+ ${PROJECT_SOURCE_DIR}
+ RESULT_VARIABLE
+ GITRET
+ OUTPUT_VARIABLE
+ GITVERSION)
+
+ if (GITRET EQUAL 0)
+ string(REGEX REPLACE "^release-([0-9]+)\\.([0-9]+)\\.([0-9]+)-(.*)" "\\1" EVENT_GIT___VERSION_MAJOR ${GITVERSION})
+ string(REGEX REPLACE "^release-([0-9]+)\\.([0-9]+)\\.([0-9]+)-(.*)" "\\2" EVENT_GIT___VERSION_MINOR ${GITVERSION})
+ string(REGEX REPLACE "^release-([0-9]+)\\.([0-9]+)\\.([0-9]+)-(.*)" "\\3" EVENT_GIT___VERSION_PATCH ${GITVERSION})
+ string(REGEX REPLACE "^release-([0-9]+)\\.([0-9]+)\\.([0-9]+)-([aA-zZ]+)" "\\4" EVENT_GIT___VERSION_STAGE ${GITVERSION})
+ endif()
+ endif()
+endmacro()
diff --git a/libs/libevent/docs/configure.ac b/libs/libevent/docs/configure.ac
new file mode 100644
index 0000000000..6a669fb701
--- /dev/null
+++ b/libs/libevent/docs/configure.ac
@@ -0,0 +1,919 @@
+dnl Copyright 2000-2007 Niels Provos
+dnl Copyright 2007-2012 Niels Provos and Nick Mathewson
+dnl
+dnl See LICENSE for copying information.
+dnl
+dnl Original version Dug Song <dugsong@monkey.org>
+
+AC_INIT(libevent,2.1.5-beta)
+AC_PREREQ(2.59)
+AC_CONFIG_SRCDIR(event.c)
+
+AC_CONFIG_MACRO_DIR([m4])
+AM_INIT_AUTOMAKE
+dnl AM_SILENT_RULES req. automake 1.11. [no] defaults V=1
+m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
+AC_CONFIG_HEADERS(config.h evconfig-private.h:evconfig-private.h.in)
+AC_DEFINE(NUMERIC_VERSION, 0x02010500, [Numeric representation of the version])
+
+dnl Initialize prefix.
+if test "$prefix" = "NONE"; then
+ prefix="/usr/local"
+fi
+
+dnl Try and get a full POSIX environment on obscure systems
+ifdef([AC_USE_SYSTEM_EXTENSIONS], [
+AC_USE_SYSTEM_EXTENSIONS
+], [
+AC_AIX
+AC_GNU_SOURCE
+AC_MINIX
+])
+
+AC_CANONICAL_BUILD
+AC_CANONICAL_HOST
+dnl the 'build' machine is where we run configure and compile
+dnl the 'host' machine is where the resulting stuff runs.
+
+#case "$host_os" in
+#
+# osf5*)
+# CFLAGS="$CFLAGS -D_OSF_SOURCE"
+# ;;
+#esac
+
+dnl Checks for programs.
+AM_PROG_CC_C_O
+AC_PROG_INSTALL
+AC_PROG_LN_S
+# AC_PROG_MKDIR_P - $(MKDIR_P) should be defined by AM_INIT_AUTOMAKE
+
+# AC_PROG_SED is only available in Autoconf >= 2.59b; workaround for older
+# versions
+ifdef([AC_PROG_SED], [AC_PROG_SED], [
+AC_CHECK_PROGS(SED, [gsed sed])
+])
+
+AC_PROG_GCC_TRADITIONAL
+
+# We need to test for at least gcc 2.95 here, because older versions don't
+# have -fno-strict-aliasing
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [
+#if !defined(__GNUC__) || (__GNUC__ < 2) || (__GNUC__ == 2 && __GNUC_MINOR__ < 95)
+#error
+#endif])], have_gcc295=yes, have_gcc295=no)
+
+if test "$GCC" = "yes" ; then
+ # Enable many gcc warnings by default...
+ CFLAGS="$CFLAGS -Wall"
+ # And disable the strict-aliasing optimization, since it breaks
+ # our sockaddr-handling code in strange ways.
+ if test x$have_gcc295 = xyes; then
+ CFLAGS="$CFLAGS -fno-strict-aliasing"
+ fi
+fi
+
+# OS X Lion started deprecating the system openssl. Let's just disable
+# all deprecation warnings on OS X.
+case "$host_os" in
+
+ darwin*)
+ CFLAGS="$CFLAGS -Wno-deprecated-declarations"
+ ;;
+esac
+
+AC_ARG_ENABLE(gcc-warnings,
+ AS_HELP_STRING(--disable-gcc-warnings, disable verbose warnings with GCC))
+
+AC_ARG_ENABLE(gcc-hardening,
+ AS_HELP_STRING(--enable-gcc-hardening, enable compiler security checks),
+[if test x$enableval = xyes; then
+ CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=2 -fstack-protector-all"
+ CFLAGS="$CFLAGS -fwrapv -fPIE -Wstack-protector"
+ CFLAGS="$CFLAGS --param ssp-buffer-size=1"
+fi])
+
+AC_ARG_ENABLE(thread-support,
+ AS_HELP_STRING(--disable-thread-support, disable support for threading),
+ [], [enable_thread_support=yes])
+AC_ARG_ENABLE(malloc-replacement,
+ AS_HELP_STRING(--disable-malloc-replacement, disable support for replacing the memory mgt functions),
+ [], [enable_malloc_replacement=yes])
+AC_ARG_ENABLE(openssl,
+ AS_HELP_STRING(--disable-openssl, disable support for openssl encryption),
+ [], [enable_openssl=yes])
+AC_ARG_ENABLE(debug-mode,
+ AS_HELP_STRING(--disable-debug-mode, disable support for running in debug mode),
+ [], [enable_debug_mode=yes])
+AC_ARG_ENABLE([libevent-install],
+ AS_HELP_STRING([--disable-libevent-install, disable installation of libevent]),
+ [], [enable_libevent_install=yes])
+AC_ARG_ENABLE([libevent-regress],
+ AS_HELP_STRING([--disable-libevent-regress, skip regress in make check]),
+ [], [enable_libevent_regress=yes])
+AC_ARG_ENABLE([samples],
+ AS_HELP_STRING([--disable-samples, skip building of sample programs]),
+ [], [enable_samples=yes])
+AC_ARG_ENABLE([function-sections],
+ AS_HELP_STRING([--enable-function-sections, make static library allow smaller binaries with --gc-sections]),
+ [], [enable_function_sections=no])
+AC_ARG_ENABLE([verbose-debug],
+ AS_HELP_STRING([--enable-verbose-debug, verbose debug logging]),
+ [], [enable_verbose_debug=no])
+
+
+AC_PROG_LIBTOOL
+
+dnl Uncomment "AC_DISABLE_SHARED" to make shared libraries not get
+dnl built by default. You can also turn shared libs on and off from
+dnl the command line with --enable-shared and --disable-shared.
+dnl AC_DISABLE_SHARED
+AC_SUBST(LIBTOOL_DEPS)
+
+AM_CONDITIONAL([BUILD_SAMPLES], [test "$enable_samples" = "yes"])
+AM_CONDITIONAL([BUILD_REGRESS], [test "$enable_libevent_regress" = "yes"])
+
+dnl Checks for libraries.
+AC_SEARCH_LIBS([inet_ntoa], [nsl])
+AC_SEARCH_LIBS([socket], [socket])
+AC_SEARCH_LIBS([inet_aton], [resolv])
+AC_SEARCH_LIBS([clock_gettime], [rt])
+AC_SEARCH_LIBS([sendfile], [sendfile])
+
+dnl - check if the macro _WIN32 is defined on this compiler.
+dnl - (this is how we check for a windows compiler)
+AC_MSG_CHECKING(for WIN32)
+AC_TRY_COMPILE(,
+ [
+#ifndef _WIN32
+die horribly
+#endif
+ ],
+ bwin32=true; AC_MSG_RESULT(yes),
+ bwin32=false; AC_MSG_RESULT(no),
+)
+
+dnl - check if the macro __CYGWIN__ is defined on this compiler.
+dnl - (this is how we check for a cygwin version of GCC)
+AC_MSG_CHECKING(for CYGWIN)
+AC_TRY_COMPILE(,
+ [
+#ifndef __CYGWIN__
+die horribly
+#endif
+ ],
+ cygwin=true; AC_MSG_RESULT(yes),
+ cygwin=false; AC_MSG_RESULT(no),
+)
+
+AC_CHECK_HEADERS([zlib.h])
+
+if test "x$ac_cv_header_zlib_h" = "xyes"; then
+dnl Determine if we have zlib for regression tests
+dnl Don't put this one in LIBS
+save_LIBS="$LIBS"
+LIBS=""
+ZLIB_LIBS=""
+have_zlib=no
+AC_SEARCH_LIBS([inflateEnd], [z],
+ [have_zlib=yes
+ ZLIB_LIBS="$LIBS"
+ AC_DEFINE(HAVE_LIBZ, 1, [Define if the system has zlib])])
+LIBS="$save_LIBS"
+AC_SUBST(ZLIB_LIBS)
+fi
+AM_CONDITIONAL(ZLIB_REGRESS, [test "$have_zlib" = "yes"])
+
+dnl See if we have openssl. This doesn't go in LIBS either.
+if test "$bwin32" = true; then
+ EV_LIB_WS32=-lws2_32
+ EV_LIB_GDI=-lgdi32
+else
+ EV_LIB_WS32=
+ EV_LIB_GDI=
+fi
+AC_SUBST(EV_LIB_WS32)
+AC_SUBST(EV_LIB_GDI)
+AC_SUBST(OPENSSL_LIBADD)
+
+AC_SYS_LARGEFILE
+
+LIBEVENT_OPENSSL
+
+dnl Checks for header files.
+AC_CHECK_HEADERS([ \
+ arpa/inet.h \
+ fcntl.h \
+ ifaddrs.h \
+ mach/mach_time.h \
+ netdb.h \
+ netinet/in.h \
+ netinet/in6.h \
+ netinet/tcp.h \
+ poll.h \
+ port.h \
+ stdarg.h \
+ stddef.h \
+ sys/devpoll.h \
+ sys/epoll.h \
+ sys/event.h \
+ sys/eventfd.h \
+ sys/ioctl.h \
+ sys/mman.h \
+ sys/param.h \
+ sys/queue.h \
+ sys/resource.h \
+ sys/select.h \
+ sys/sendfile.h \
+ sys/socket.h \
+ sys/stat.h \
+ sys/time.h \
+ sys/timerfd.h \
+ sys/uio.h \
+ sys/wait.h \
+])
+
+AC_CHECK_HEADERS(sys/sysctl.h, [], [], [
+#ifdef HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+])
+if test "x$ac_cv_header_sys_queue_h" = "xyes"; then
+ AC_MSG_CHECKING(for TAILQ_FOREACH in sys/queue.h)
+ AC_EGREP_CPP(yes,
+[
+#include <sys/queue.h>
+#ifdef TAILQ_FOREACH
+ yes
+#endif
+], [AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_TAILQFOREACH, 1,
+ [Define if TAILQ_FOREACH is defined in <sys/queue.h>])],
+ AC_MSG_RESULT(no)
+ )
+fi
+
+if test "x$ac_cv_header_sys_time_h" = "xyes"; then
+ AC_MSG_CHECKING(for timeradd in sys/time.h)
+ AC_EGREP_CPP(yes,
+[
+#include <sys/time.h>
+#ifdef timeradd
+ yes
+#endif
+], [ AC_DEFINE(HAVE_TIMERADD, 1,
+ [Define if timeradd is defined in <sys/time.h>])
+ AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no)
+)
+fi
+
+if test "x$ac_cv_header_sys_time_h" = "xyes"; then
+ AC_MSG_CHECKING(for timercmp in sys/time.h)
+ AC_EGREP_CPP(yes,
+[
+#include <sys/time.h>
+#ifdef timercmp
+ yes
+#endif
+], [ AC_DEFINE(HAVE_TIMERCMP, 1,
+ [Define if timercmp is defined in <sys/time.h>])
+ AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no)
+)
+fi
+
+if test "x$ac_cv_header_sys_time_h" = "xyes"; then
+ AC_MSG_CHECKING(for timerclear in sys/time.h)
+ AC_EGREP_CPP(yes,
+[
+#include <sys/time.h>
+#ifdef timerclear
+ yes
+#endif
+], [ AC_DEFINE(HAVE_TIMERCLEAR, 1,
+ [Define if timerclear is defined in <sys/time.h>])
+ AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no)
+)
+fi
+
+if test "x$ac_cv_header_sys_time_h" = "xyes"; then
+ AC_MSG_CHECKING(for timerisset in sys/time.h)
+ AC_EGREP_CPP(yes,
+[
+#include <sys/time.h>
+#ifdef timerisset
+ yes
+#endif
+], [ AC_DEFINE(HAVE_TIMERISSET, 1,
+ [Define if timerisset is defined in <sys/time.h>])
+ AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no)
+)
+fi
+
+if test "x$ac_cv_header_sys_sysctl_h" = "xyes"; then
+ AC_CHECK_DECLS([CTL_KERN, KERN_RANDOM, RANDOM_UUID, KERN_ARND], [], [],
+ [[#include <sys/types.h>
+ #include <sys/sysctl.h>]]
+ )
+fi
+
+AM_CONDITIONAL(BUILD_WIN32, test x$bwin32 = xtrue)
+AM_CONDITIONAL(BUILD_CYGWIN, test x$cygwin = xtrue)
+AM_CONDITIONAL(BUILD_WITH_NO_UNDEFINED, test x$bwin32 = xtrue || test x$cygwin = xtrue)
+
+if test x$bwin32 = xtrue; then
+ AC_SEARCH_LIBS([getservbyname],[ws2_32])
+fi
+
+dnl Checks for typedefs, structures, and compiler characteristics.
+AC_C_CONST
+AC_C_INLINE
+AC_HEADER_TIME
+
+dnl Checks for library functions.
+AC_CHECK_FUNCS([ \
+ accept4 \
+ arc4random \
+ arc4random_buf \
+ clock_gettime \
+ eventfd \
+ epoll_create1 \
+ fcntl \
+ getegid \
+ geteuid \
+ getifaddrs \
+ getnameinfo \
+ getprotobynumber \
+ gettimeofday \
+ inet_ntop \
+ inet_pton \
+ issetugid \
+ mach_absolute_time \
+ mmap \
+ nanosleep \
+ pipe \
+ pipe2 \
+ putenv \
+ sendfile \
+ setenv \
+ setrlimit \
+ sigaction \
+ signal \
+ splice \
+ strlcpy \
+ strsep \
+ strtok_r \
+ strtoll \
+ sysctl \
+ timerfd_create \
+ umask \
+ unsetenv \
+ usleep \
+ vasprintf \
+ getservbyname \
+])
+AM_CONDITIONAL(STRLCPY_IMPL, [test x"$ac_cv_func_strlcpy" = xno])
+
+AC_CACHE_CHECK(
+ [for getaddrinfo],
+ [libevent_cv_getaddrinfo],
+ [AC_LINK_IFELSE(
+ [AC_LANG_PROGRAM(
+ [[
+ #ifdef HAVE_NETDB_H
+ #include <netdb.h>
+ #endif
+ ]],
+ [[
+ getaddrinfo;
+ ]]
+ )],
+ [libevent_cv_getaddrinfo=yes],
+ [libevent_cv_getaddrinfo=no]
+ )]
+)
+if test "$libevent_cv_getaddrinfo" = "yes" ; then
+ AC_DEFINE([HAVE_GETADDRINFO], [1], [Do we have getaddrinfo()?])
+else
+
+# Check for gethostbyname_r in all its glorious incompatible versions.
+# (This is cut-and-pasted from Tor, which based its logic on
+# Python's configure.in.)
+AH_TEMPLATE(HAVE_GETHOSTBYNAME_R,
+ [Define this if you have any gethostbyname_r()])
+
+AC_CHECK_FUNC(gethostbyname_r, [
+ AC_MSG_CHECKING([how many arguments gethostbyname_r() wants])
+ OLD_CFLAGS=$CFLAGS
+ CFLAGS="$CFLAGS $MY_CPPFLAGS $MY_THREAD_CPPFLAGS $MY_CFLAGS"
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([
+#include <netdb.h>
+ ], [[
+ char *cp1, *cp2;
+ struct hostent *h1, *h2;
+ int i1, i2;
+ (void)gethostbyname_r(cp1,h1,cp2,i1,&h2,&i2);
+ ]])],[
+ AC_DEFINE(HAVE_GETHOSTBYNAME_R)
+ AC_DEFINE(HAVE_GETHOSTBYNAME_R_6_ARG, 1,
+ [Define this if gethostbyname_r takes 6 arguments])
+ AC_MSG_RESULT(6)
+ ], [
+ AC_TRY_COMPILE([
+#include <netdb.h>
+ ], [
+ char *cp1, *cp2;
+ struct hostent *h1;
+ int i1, i2;
+ (void)gethostbyname_r(cp1,h1,cp2,i1,&i2);
+ ], [
+ AC_DEFINE(HAVE_GETHOSTBYNAME_R)
+ AC_DEFINE(HAVE_GETHOSTBYNAME_R_5_ARG, 1,
+ [Define this if gethostbyname_r takes 5 arguments])
+ AC_MSG_RESULT(5)
+ ], [
+ AC_TRY_COMPILE([
+#include <netdb.h>
+ ], [
+ char *cp1;
+ struct hostent *h1;
+ struct hostent_data hd;
+ (void) gethostbyname_r(cp1,h1,&hd);
+ ], [
+ AC_DEFINE(HAVE_GETHOSTBYNAME_R)
+ AC_DEFINE(HAVE_GETHOSTBYNAME_R_3_ARG, 1,
+ [Define this if gethostbyname_r takes 3 arguments])
+ AC_MSG_RESULT(3)
+ ], [
+ AC_MSG_RESULT(0)
+ ])
+ ])
+ ])
+ CFLAGS=$OLD_CFLAGS
+])
+
+fi
+
+AC_MSG_CHECKING(for F_SETFD in fcntl.h)
+AC_EGREP_CPP(yes,
+[
+#define _GNU_SOURCE
+#include <fcntl.h>
+#ifdef F_SETFD
+yes
+#endif
+], [ AC_DEFINE(HAVE_SETFD, 1,
+ [Define if F_SETFD is defined in <fcntl.h>])
+ AC_MSG_RESULT(yes) ], AC_MSG_RESULT(no))
+
+needsignal=no
+haveselect=no
+if test x$bwin32 != xtrue; then
+ AC_CHECK_FUNCS(select, [haveselect=yes], )
+ if test "x$haveselect" = "xyes" ; then
+ needsignal=yes
+ fi
+fi
+AM_CONDITIONAL(SELECT_BACKEND, [test "x$haveselect" = "xyes"])
+
+havepoll=no
+AC_CHECK_FUNCS(poll, [havepoll=yes], )
+if test "x$havepoll" = "xyes" ; then
+ needsignal=yes
+fi
+AM_CONDITIONAL(POLL_BACKEND, [test "x$havepoll" = "xyes"])
+
+havedevpoll=no
+if test "x$ac_cv_header_sys_devpoll_h" = "xyes"; then
+ AC_DEFINE(HAVE_DEVPOLL, 1,
+ [Define if /dev/poll is available])
+fi
+AM_CONDITIONAL(DEVPOLL_BACKEND, [test "x$ac_cv_header_sys_devpoll_h" = "xyes"])
+
+havekqueue=no
+if test "x$ac_cv_header_sys_event_h" = "xyes"; then
+ AC_CHECK_FUNCS(kqueue, [havekqueue=yes], )
+ if test "x$havekqueue" = "xyes" ; then
+ AC_MSG_CHECKING(for working kqueue)
+ AC_TRY_RUN(
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/event.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+int
+main(int argc, char **argv)
+{
+ int kq;
+ int n;
+ int fd[[2]];
+ struct kevent ev;
+ struct timespec ts;
+ char buf[[8000]];
+
+ if (pipe(fd) == -1)
+ exit(1);
+ if (fcntl(fd[[1]], F_SETFL, O_NONBLOCK) == -1)
+ exit(1);
+
+ while ((n = write(fd[[1]], buf, sizeof(buf))) == sizeof(buf))
+ ;
+
+ if ((kq = kqueue()) == -1)
+ exit(1);
+
+ memset(&ev, 0, sizeof(ev));
+ ev.ident = fd[[1]];
+ ev.filter = EVFILT_WRITE;
+ ev.flags = EV_ADD | EV_ENABLE;
+ n = kevent(kq, &ev, 1, NULL, 0, NULL);
+ if (n == -1)
+ exit(1);
+
+ read(fd[[0]], buf, sizeof(buf));
+
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+ n = kevent(kq, NULL, 0, &ev, 1, &ts);
+ if (n == -1 || n == 0)
+ exit(1);
+
+ exit(0);
+}, [AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_WORKING_KQUEUE, 1,
+ [Define if kqueue works correctly with pipes])
+ havekqueue=yes
+ ], AC_MSG_RESULT(no), AC_MSG_RESULT(no))
+ fi
+fi
+AM_CONDITIONAL(KQUEUE_BACKEND, [test "x$havekqueue" = "xyes"])
+
+haveepollsyscall=no
+haveepoll=no
+AC_CHECK_FUNCS(epoll_ctl, [haveepoll=yes], )
+if test "x$haveepoll" = "xyes" ; then
+ AC_DEFINE(HAVE_EPOLL, 1,
+ [Define if your system supports the epoll system calls])
+ needsignal=yes
+fi
+if test "x$ac_cv_header_sys_epoll_h" = "xyes"; then
+ if test "x$haveepoll" = "xno" ; then
+ AC_MSG_CHECKING(for epoll system call)
+ AC_TRY_RUN(
+#include <stdint.h>
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+#include <sys/epoll.h>
+#include <unistd.h>
+
+int
+epoll_create(int size)
+{
+ return (syscall(__NR_epoll_create, size));
+}
+
+int
+main(int argc, char **argv)
+{
+ int epfd;
+
+ epfd = epoll_create(256);
+ exit (epfd == -1 ? 1 : 0);
+}, [AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_EPOLL, 1,
+ [Define if your system supports the epoll system calls])
+ needsignal=yes
+ have_epoll=yes
+ AC_LIBOBJ(epoll_sub)
+ ], AC_MSG_RESULT(no), AC_MSG_RESULT(no))
+ fi
+fi
+AM_CONDITIONAL(EPOLL_BACKEND, [test "x$haveepoll" = "xyes"])
+
+haveeventports=no
+AC_CHECK_FUNCS(port_create, [haveeventports=yes], )
+if test "x$haveeventports" = "xyes" ; then
+ AC_DEFINE(HAVE_EVENT_PORTS, 1,
+ [Define if your system supports event ports])
+ needsignal=yes
+fi
+AM_CONDITIONAL(EVPORT_BACKEND, [test "x$haveeventports" = "xyes"])
+
+if test "x$bwin32" = "xtrue"; then
+ needsignal=yes
+fi
+
+AM_CONDITIONAL(SIGNAL_SUPPORT, [test "x$needsignal" = "xyes"])
+
+AC_TYPE_PID_T
+AC_TYPE_SIZE_T
+AC_TYPE_SSIZE_T
+
+AC_CHECK_TYPES([uint64_t, uint32_t, uint16_t, uint8_t, uintptr_t], , ,
+[#ifdef HAVE_STDINT_H
+#include <stdint.h>
+#elif defined(HAVE_INTTYPES_H)
+#include <inttypes.h>
+#endif
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif])
+
+AC_CHECK_TYPES([fd_mask], , ,
+[#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_SELECT_H
+#include <sys/select.h>
+#endif])
+
+AC_CHECK_SIZEOF(long long)
+AC_CHECK_SIZEOF(long)
+AC_CHECK_SIZEOF(int)
+AC_CHECK_SIZEOF(short)
+AC_CHECK_SIZEOF(size_t)
+AC_CHECK_SIZEOF(void *)
+AC_CHECK_SIZEOF(off_t)
+
+AC_CHECK_TYPES([struct in6_addr, struct sockaddr_in6, sa_family_t, struct addrinfo, struct sockaddr_storage], , ,
+[#define _GNU_SOURCE
+#include <sys/types.h>
+#ifdef HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+#ifdef HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif
+#ifdef HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#ifdef HAVE_NETDB_H
+#include <netdb.h>
+#endif
+#ifdef _WIN32
+#define WIN32_WINNT 0x400
+#define _WIN32_WINNT 0x400
+#define WIN32_LEAN_AND_MEAN
+#if defined(_MSC_VER) && (_MSC_VER < 1300)
+#include <winsock.h>
+#else
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#endif
+#endif
+])
+AC_CHECK_MEMBERS([struct in6_addr.s6_addr32, struct in6_addr.s6_addr16, struct sockaddr_in.sin_len, struct sockaddr_in6.sin6_len, struct sockaddr_storage.ss_family, struct sockaddr_storage.__ss_family], , ,
+[#include <sys/types.h>
+#ifdef HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+#ifdef HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif
+#ifdef HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#ifdef _WIN32
+#define WIN32_WINNT 0x400
+#define _WIN32_WINNT 0x400
+#define WIN32_LEAN_AND_MEAN
+#if defined(_MSC_VER) && (_MSC_VER < 1300)
+#include <winsock.h>
+#else
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#endif
+#endif
+])
+
+AC_CHECK_TYPES([struct so_linger],
+[#define HAVE_SO_LINGER], ,
+[
+#ifdef HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+])
+
+AC_MSG_CHECKING([for socklen_t])
+AC_TRY_COMPILE([
+ #include <sys/types.h>
+ #include <sys/socket.h>],
+ [socklen_t x;],
+ AC_MSG_RESULT([yes]),
+ [AC_MSG_RESULT([no])
+ AC_DEFINE(socklen_t, unsigned int,
+ [Define to unsigned int if you dont have it])]
+)
+
+AC_MSG_CHECKING([whether our compiler supports __func__])
+AC_TRY_COMPILE([],
+ [ const char *cp = __func__; ],
+ AC_MSG_RESULT([yes]),
+ AC_MSG_RESULT([no])
+ AC_MSG_CHECKING([whether our compiler supports __FUNCTION__])
+ AC_TRY_COMPILE([],
+ [ const char *cp = __FUNCTION__; ],
+ AC_MSG_RESULT([yes])
+ AC_DEFINE(__func__, __FUNCTION__,
+ [Define to appropriate substitue if compiler doesnt have __func__]),
+ AC_MSG_RESULT([no])
+ AC_DEFINE(__func__, __FILE__,
+ [Define to appropriate substitue if compiler doesnt have __func__])))
+
+
+# check if we can compile with pthreads
+have_pthreads=no
+if test x$bwin32 != xtrue && test "$enable_thread_support" != "no"; then
+ ACX_PTHREAD([
+ AC_DEFINE(HAVE_PTHREADS, 1,
+ [Define if we have pthreads on this system])
+ have_pthreads=yes])
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+ AC_CHECK_SIZEOF(pthread_t, ,
+ [AC_INCLUDES_DEFAULT()
+ #include <pthread.h> ]
+ )
+fi
+AM_CONDITIONAL(PTHREADS, [test "$have_pthreads" != "no" && test "$enable_thread_support" != "no"])
+
+# check if we should compile locking into the library
+if test x$enable_thread_support = xno; then
+ AC_DEFINE(DISABLE_THREAD_SUPPORT, 1,
+ [Define if libevent should not be compiled with thread support])
+fi
+
+# check if we should hard-code the mm functions.
+if test x$enable_malloc_replacement = xno; then
+ AC_DEFINE(DISABLE_MM_REPLACEMENT, 1,
+ [Define if libevent should not allow replacing the mm functions])
+fi
+
+# check if we should hard-code debugging out
+if test x$enable_debug_mode = xno; then
+ AC_DEFINE(DISABLE_DEBUG_MODE, 1,
+ [Define if libevent should build without support for a debug mode])
+fi
+
+# check if we should enable verbose debugging
+if test x$enable_verbose_debug = xyes; then
+ CFLAGS="$CFLAGS -DUSE_DEBUG"
+fi
+
+# check if we have and should use openssl
+AM_CONDITIONAL(OPENSSL, [test "$enable_openssl" != "no" && test "$have_openssl" = "yes"])
+if test "x$enable_openssl" = "xyes"; then
+ AC_SEARCH_LIBS([ERR_remove_thread_state], [crypto],
+ [AC_DEFINE(HAVE_ERR_REMOVE_THREAD_STATE, 1, [Define to 1 if you have ERR_remove_thread_stat().])])
+fi
+
+# Add some more warnings which we use in development but not in the
+# released versions. (Some relevant gcc versions can't handle these.)
+if test x$enable_gcc_warnings != xno && test "$GCC" = "yes"; then
+
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [
+#if !defined(__GNUC__) || (__GNUC__ < 4)
+#error
+#endif])], have_gcc4=yes, have_gcc4=no)
+
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [
+#if !defined(__GNUC__) || (__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 2)
+#error
+#endif])], have_gcc42=yes, have_gcc42=no)
+
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [
+#if !defined(__GNUC__) || (__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 5)
+#error
+#endif])], have_gcc45=yes, have_gcc45=no)
+
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [
+#if !defined(__clang__)
+#error
+#endif])], have_clang=yes, have_clang=no)
+
+ CFLAGS="$CFLAGS -W -Wfloat-equal -Wundef -Wpointer-arith -Wstrict-prototypes -Wmissing-prototypes -Wwrite-strings -Wredundant-decls -Wchar-subscripts -Wcomment -Wformat -Wwrite-strings -Wmissing-declarations -Wredundant-decls -Wnested-externs -Wbad-function-cast -Wswitch-enum"
+ if test x$enable_gcc_warnings = xyes; then
+ CFLAGS="$CFLAGS -Werror"
+ fi
+
+ CFLAGS="$CFLAGS -Wno-unused-parameter -Wstrict-aliasing"
+
+ if test x$have_gcc4 = xyes ; then
+ # These warnings break gcc 3.3.5 and work on gcc 4.0.2
+ CFLAGS="$CFLAGS -Winit-self -Wmissing-field-initializers -Wdeclaration-after-statement"
+ #CFLAGS="$CFLAGS -Wold-style-definition"
+ fi
+
+ if test x$have_gcc42 = xyes ; then
+ # These warnings break gcc 4.0.2 and work on gcc 4.2
+ CFLAGS="$CFLAGS -Waddress"
+ fi
+
+ if test x$have_gcc42 = xyes && test x$have_clang = xno; then
+ # These warnings break gcc 4.0.2 and clang, but work on gcc 4.2
+ CFLAGS="$CFLAGS -Wnormalized=id -Woverride-init"
+ fi
+
+ if test x$have_gcc45 = xyes ; then
+ # These warnings work on gcc 4.5
+ CFLAGS="$CFLAGS -Wlogical-op"
+ fi
+
+ if test x$have_clang = xyes; then
+ # Disable the unused-function warnings, because these trigger
+ # for minheap-internal.h related code.
+ CFLAGS="$CFLAGS -Wno-unused-function"
+
+ # clang on macosx emits warnigns for each directory specified which
+ # isn't "used" generating a lot of build noise (typically 3 warnings
+ # per file
+ case "$host_os" in
+ darwin*)
+ CFLAGS="$CFLAGS -Qunused-arguments"
+ ;;
+ esac
+ fi
+
+##This will break the world on some 64-bit architectures
+# CFLAGS="$CFLAGS -Winline"
+
+fi
+
+LIBEVENT_GC_SECTIONS=
+if test "$GCC" = yes && test "$enable_function_sections" = yes ; then
+ AC_CACHE_CHECK(
+ [if linker supports omitting unused code and data],
+ [libevent_cv_gc_sections_runs],
+ [
+ dnl NetBSD will link but likely not run with --gc-sections
+ dnl http://bugs.ntp.org/1844
+ dnl http://gnats.netbsd.org/40401
+ dnl --gc-sections causes attempt to load as linux elf, with
+ dnl wrong syscalls in place. Test a little gauntlet of
+ dnl simple stdio read code checking for errors, expecting
+ dnl enough syscall differences that the NetBSD code will
+ dnl fail even with Linux emulation working as designed.
+ dnl A shorter test could be refined by someone with access
+ dnl to a NetBSD host with Linux emulation working.
+ origCFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -Wl,--gc-sections"
+ AC_LINK_IFELSE(
+ [AC_LANG_PROGRAM(
+ [[
+ #include <stdlib.h>
+ #include <stdio.h>
+ ]],
+ [[
+ FILE * fpC;
+ char buf[32];
+ size_t cch;
+ int read_success_once;
+
+ fpC = fopen("conftest.c", "r");
+ if (NULL == fpC)
+ exit(1);
+ do {
+ cch = fread(buf, sizeof(buf), 1, fpC);
+ read_success_once |= (0 != cch);
+ } while (0 != cch);
+ if (!read_success_once)
+ exit(2);
+ if (!feof(fpC))
+ exit(3);
+ if (0 != fclose(fpC))
+ exit(4);
+
+ exit(EXIT_SUCCESS);
+ ]]
+ )],
+ [
+ dnl We have to do this invocation manually so that we can
+ dnl get the output of conftest.err to make sure it doesn't
+ dnl mention gc-sections.
+ if test "X$cross_compiling" = "Xyes" || grep gc-sections conftest.err ; then
+ libevent_cv_gc_sections_runs=no
+ else
+ libevent_cv_gc_sections_runs=no
+ ./conftest >/dev/null 2>&1 && libevent_cv_gc_sections_runs=yes
+ fi
+ ],
+ [libevent_cv_gc_sections_runs=no]
+ )
+ CFLAGS="$origCFLAGS"
+ AS_UNSET([origCFLAGS])
+ ]
+ )
+ case "$libevent_cv_gc_sections_runs" in
+ yes)
+ CFLAGS="-ffunction-sections -fdata-sections $CFLAGS"
+ LIBEVENT_GC_SECTIONS="-Wl,--gc-sections"
+ ;;
+ esac
+fi
+AC_SUBST([LIBEVENT_GC_SECTIONS])
+
+AM_CONDITIONAL([INSTALL_LIBEVENT], [test "$enable_libevent_install" = "yes"])
+
+AC_CONFIG_FILES( [libevent.pc libevent_openssl.pc libevent_pthreads.pc] )
+AC_OUTPUT(Makefile)
diff --git a/libs/libevent/docs/devpoll.c b/libs/libevent/docs/devpoll.c
new file mode 100644
index 0000000000..3a2f86d6f1
--- /dev/null
+++ b/libs/libevent/docs/devpoll.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2000-2009 Niels Provos <provos@citi.umich.edu>
+ * Copyright 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef EVENT__HAVE_DEVPOLL
+
+#include <sys/types.h>
+#include <sys/resource.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#include <sys/devpoll.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "event2/thread.h"
+#include "event-internal.h"
+#include "evsignal-internal.h"
+#include "log-internal.h"
+#include "evmap-internal.h"
+#include "evthread-internal.h"
+
+struct devpollop {
+ struct pollfd *events;
+ int nevents;
+ int dpfd;
+ struct pollfd *changes;
+ int nchanges;
+};
+
+static void *devpoll_init(struct event_base *);
+static int devpoll_add(struct event_base *, int fd, short old, short events, void *);
+static int devpoll_del(struct event_base *, int fd, short old, short events, void *);
+static int devpoll_dispatch(struct event_base *, struct timeval *);
+static void devpoll_dealloc(struct event_base *);
+
+const struct eventop devpollops = {
+ "devpoll",
+ devpoll_init,
+ devpoll_add,
+ devpoll_del,
+ devpoll_dispatch,
+ devpoll_dealloc,
+ 1, /* need reinit */
+ EV_FEATURE_FDS|EV_FEATURE_O1,
+ 0
+};
+
+#define NEVENT 32000
+
+static int
+devpoll_commit(struct devpollop *devpollop)
+{
+ /*
+ * Due to a bug in Solaris, we have to use pwrite with an offset of 0.
+ * Write is limited to 2GB of data, until it will fail.
+ */
+ if (pwrite(devpollop->dpfd, devpollop->changes,
+ sizeof(struct pollfd) * devpollop->nchanges, 0) == -1)
+ return (-1);
+
+ devpollop->nchanges = 0;
+ return (0);
+}
+
+static int
+devpoll_queue(struct devpollop *devpollop, int fd, int events) {
+ struct pollfd *pfd;
+
+ if (devpollop->nchanges >= devpollop->nevents) {
+ /*
+ * Change buffer is full, must commit it to /dev/poll before
+ * adding more
+ */
+ if (devpoll_commit(devpollop) != 0)
+ return (-1);
+ }
+
+ pfd = &devpollop->changes[devpollop->nchanges++];
+ pfd->fd = fd;
+ pfd->events = events;
+ pfd->revents = 0;
+
+ return (0);
+}
+
+static void *
+devpoll_init(struct event_base *base)
+{
+ int dpfd, nfiles = NEVENT;
+ struct rlimit rl;
+ struct devpollop *devpollop;
+
+ if (!(devpollop = mm_calloc(1, sizeof(struct devpollop))))
+ return (NULL);
+
+ if (getrlimit(RLIMIT_NOFILE, &rl) == 0 &&
+ rl.rlim_cur != RLIM_INFINITY)
+ nfiles = rl.rlim_cur;
+
+ /* Initialize the kernel queue */
+ if ((dpfd = evutil_open_closeonexec_("/dev/poll", O_RDWR, 0)) == -1) {
+ event_warn("open: /dev/poll");
+ mm_free(devpollop);
+ return (NULL);
+ }
+
+ devpollop->dpfd = dpfd;
+
+ /* Initialize fields */
+ /* FIXME: allocating 'nfiles' worth of space here can be
+ * expensive and unnecessary. See how epoll.c does it instead. */
+ devpollop->events = mm_calloc(nfiles, sizeof(struct pollfd));
+ if (devpollop->events == NULL) {
+ mm_free(devpollop);
+ close(dpfd);
+ return (NULL);
+ }
+ devpollop->nevents = nfiles;
+
+ devpollop->changes = mm_calloc(nfiles, sizeof(struct pollfd));
+ if (devpollop->changes == NULL) {
+ mm_free(devpollop->events);
+ mm_free(devpollop);
+ close(dpfd);
+ return (NULL);
+ }
+
+ evsig_init_(base);
+
+ return (devpollop);
+}
+
+static int
+devpoll_dispatch(struct event_base *base, struct timeval *tv)
+{
+ struct devpollop *devpollop = base->evbase;
+ struct pollfd *events = devpollop->events;
+ struct dvpoll dvp;
+ int i, res, timeout = -1;
+
+ if (devpollop->nchanges)
+ devpoll_commit(devpollop);
+
+ if (tv != NULL)
+ timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;
+
+ dvp.dp_fds = devpollop->events;
+ dvp.dp_nfds = devpollop->nevents;
+ dvp.dp_timeout = timeout;
+
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+ res = ioctl(devpollop->dpfd, DP_POLL, &dvp);
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ if (res == -1) {
+ if (errno != EINTR) {
+ event_warn("ioctl: DP_POLL");
+ return (-1);
+ }
+
+ return (0);
+ }
+
+ event_debug(("%s: devpoll_wait reports %d", __func__, res));
+
+ for (i = 0; i < res; i++) {
+ int which = 0;
+ int what = events[i].revents;
+
+ if (what & POLLHUP)
+ what |= POLLIN | POLLOUT;
+ else if (what & POLLERR)
+ what |= POLLIN | POLLOUT;
+
+ if (what & POLLIN)
+ which |= EV_READ;
+ if (what & POLLOUT)
+ which |= EV_WRITE;
+
+ if (!which)
+ continue;
+
+ /* XXX(niels): not sure if this works for devpoll */
+ evmap_io_active_(base, events[i].fd, which);
+ }
+
+ return (0);
+}
+
+
+static int
+devpoll_add(struct event_base *base, int fd, short old, short events, void *p)
+{
+ struct devpollop *devpollop = base->evbase;
+ int res;
+ (void)p;
+
+ /*
+ * It's not necessary to OR the existing read/write events that we
+ * are currently interested in with the new event we are adding.
+ * The /dev/poll driver ORs any new events with the existing events
+ * that it has cached for the fd.
+ */
+
+ res = 0;
+ if (events & EV_READ)
+ res |= POLLIN;
+ if (events & EV_WRITE)
+ res |= POLLOUT;
+
+ if (devpoll_queue(devpollop, fd, res) != 0)
+ return (-1);
+
+ return (0);
+}
+
+static int
+devpoll_del(struct event_base *base, int fd, short old, short events, void *p)
+{
+ struct devpollop *devpollop = base->evbase;
+ int res;
+ (void)p;
+
+ res = 0;
+ if (events & EV_READ)
+ res |= POLLIN;
+ if (events & EV_WRITE)
+ res |= POLLOUT;
+
+ /*
+ * The only way to remove an fd from the /dev/poll monitored set is
+ * to use POLLREMOVE by itself. This removes ALL events for the fd
+ * provided so if we care about two events and are only removing one
+ * we must re-add the other event after POLLREMOVE.
+ */
+
+ if (devpoll_queue(devpollop, fd, POLLREMOVE) != 0)
+ return (-1);
+
+ if ((res & (POLLIN|POLLOUT)) != (POLLIN|POLLOUT)) {
+ /*
+ * We're not deleting all events, so we must resubmit the
+ * event that we are still interested in if one exists.
+ */
+
+ if ((res & POLLIN) && (old & EV_WRITE)) {
+ /* Deleting read, still care about write */
+ devpoll_queue(devpollop, fd, POLLOUT);
+ } else if ((res & POLLOUT) && (old & EV_READ)) {
+ /* Deleting write, still care about read */
+ devpoll_queue(devpollop, fd, POLLIN);
+ }
+ }
+
+ return (0);
+}
+
+static void
+devpoll_dealloc(struct event_base *base)
+{
+ struct devpollop *devpollop = base->evbase;
+
+ evsig_dealloc_(base);
+ if (devpollop->events)
+ mm_free(devpollop->events);
+ if (devpollop->changes)
+ mm_free(devpollop->changes);
+ if (devpollop->dpfd >= 0)
+ close(devpollop->dpfd);
+
+ memset(devpollop, 0, sizeof(struct devpollop));
+ mm_free(devpollop);
+}
+
+#endif /* EVENT__HAVE_DEVPOLL */
diff --git a/libs/libevent/docs/epoll.c b/libs/libevent/docs/epoll.c
new file mode 100644
index 0000000000..bf730b23db
--- /dev/null
+++ b/libs/libevent/docs/epoll.c
@@ -0,0 +1,540 @@
+/*
+ * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright 2007-2012 Niels Provos, Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef EVENT__HAVE_EPOLL
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#include <sys/epoll.h>
+#include <signal.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#ifdef EVENT__HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TIMERFD_H
+#include <sys/timerfd.h>
+#endif
+
+#include "event-internal.h"
+#include "evsignal-internal.h"
+#include "event2/thread.h"
+#include "evthread-internal.h"
+#include "log-internal.h"
+#include "evmap-internal.h"
+#include "changelist-internal.h"
+#include "time-internal.h"
+
+/* Since Linux 2.6.17, epoll is able to report about peer half-closed connection
+ using special EPOLLRDHUP flag on a read event.
+*/
+#if !defined(EPOLLRDHUP)
+#define EPOLLRDHUP 0
+#define EARLY_CLOSE_IF_HAVE_RDHUP 0
+#else
+#define EARLY_CLOSE_IF_HAVE_RDHUP EV_FEATURE_EARLY_CLOSE
+#endif
+
+#include "epolltable-internal.h"
+
+#if defined(EVENT__HAVE_SYS_TIMERFD_H) && \
+ defined(EVENT__HAVE_TIMERFD_CREATE) && \
+ defined(HAVE_POSIX_MONOTONIC) && defined(TFD_NONBLOCK) && \
+ defined(TFD_CLOEXEC)
+/* Note that we only use timerfd if TFD_NONBLOCK and TFD_CLOEXEC are available
+ and working. This means that we can't support it on 2.6.25 (where timerfd
+ was introduced) or 2.6.26, since 2.6.27 introduced those flags.
+ */
+#define USING_TIMERFD
+#endif
+
+struct epollop {
+ struct epoll_event *events;
+ int nevents;
+ int epfd;
+#ifdef USING_TIMERFD
+ int timerfd;
+#endif
+};
+
+static void *epoll_init(struct event_base *);
+static int epoll_dispatch(struct event_base *, struct timeval *);
+static void epoll_dealloc(struct event_base *);
+
+static const struct eventop epollops_changelist = {
+ "epoll (with changelist)",
+ epoll_init,
+ event_changelist_add_,
+ event_changelist_del_,
+ epoll_dispatch,
+ epoll_dealloc,
+ 1, /* need reinit */
+ EV_FEATURE_ET|EV_FEATURE_O1| EARLY_CLOSE_IF_HAVE_RDHUP,
+ EVENT_CHANGELIST_FDINFO_SIZE
+};
+
+
+static int epoll_nochangelist_add(struct event_base *base, evutil_socket_t fd,
+ short old, short events, void *p);
+static int epoll_nochangelist_del(struct event_base *base, evutil_socket_t fd,
+ short old, short events, void *p);
+
+const struct eventop epollops = {
+ "epoll",
+ epoll_init,
+ epoll_nochangelist_add,
+ epoll_nochangelist_del,
+ epoll_dispatch,
+ epoll_dealloc,
+ 1, /* need reinit */
+ EV_FEATURE_ET|EV_FEATURE_O1|EV_FEATURE_EARLY_CLOSE,
+ 0
+};
+
+#define INITIAL_NEVENT 32
+#define MAX_NEVENT 4096
+
+/* On Linux kernels at least up to 2.6.24.4, epoll can't handle timeout
+ * values bigger than (LONG_MAX - 999ULL)/HZ. HZ in the wild can be
+ * as big as 1000, and LONG_MAX can be as small as (1<<31)-1, so the
+ * largest number of msec we can support here is 2147482. Let's
+ * round that down by 47 seconds.
+ */
+#define MAX_EPOLL_TIMEOUT_MSEC (35*60*1000)
+
+static void *
+epoll_init(struct event_base *base)
+{
+ int epfd = -1;
+ struct epollop *epollop;
+
+#ifdef EVENT__HAVE_EPOLL_CREATE1
+ /* First, try the shiny new epoll_create1 interface, if we have it. */
+ epfd = epoll_create1(EPOLL_CLOEXEC);
+#endif
+ if (epfd == -1) {
+ /* Initialize the kernel queue using the old interface. (The
+ size field is ignored since 2.6.8.) */
+ if ((epfd = epoll_create(32000)) == -1) {
+ if (errno != ENOSYS)
+ event_warn("epoll_create");
+ return (NULL);
+ }
+ evutil_make_socket_closeonexec(epfd);
+ }
+
+ if (!(epollop = mm_calloc(1, sizeof(struct epollop)))) {
+ close(epfd);
+ return (NULL);
+ }
+
+ epollop->epfd = epfd;
+
+ /* Initialize fields */
+ epollop->events = mm_calloc(INITIAL_NEVENT, sizeof(struct epoll_event));
+ if (epollop->events == NULL) {
+ mm_free(epollop);
+ close(epfd);
+ return (NULL);
+ }
+ epollop->nevents = INITIAL_NEVENT;
+
+ if ((base->flags & EVENT_BASE_FLAG_EPOLL_USE_CHANGELIST) != 0 ||
+ ((base->flags & EVENT_BASE_FLAG_IGNORE_ENV) == 0 &&
+ evutil_getenv_("EVENT_EPOLL_USE_CHANGELIST") != NULL)) {
+
+ base->evsel = &epollops_changelist;
+ }
+
+#ifdef USING_TIMERFD
+ /*
+ The epoll interface ordinarily gives us one-millisecond precision,
+ so on Linux it makes perfect sense to use the CLOCK_MONOTONIC_COARSE
+ timer. But when the user has set the new PRECISE_TIMER flag for an
+ event_base, we can try to use timerfd to give them finer granularity.
+ */
+ if ((base->flags & EVENT_BASE_FLAG_PRECISE_TIMER) &&
+ base->monotonic_timer.monotonic_clock == CLOCK_MONOTONIC) {
+ int fd;
+ fd = epollop->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC);
+ if (epollop->timerfd >= 0) {
+ struct epoll_event epev;
+ memset(&epev, 0, sizeof(epev));
+ epev.data.fd = epollop->timerfd;
+ epev.events = EPOLLIN;
+ if (epoll_ctl(epollop->epfd, EPOLL_CTL_ADD, fd, &epev) < 0) {
+ event_warn("epoll_ctl(timerfd)");
+ close(fd);
+ epollop->timerfd = -1;
+ }
+ } else {
+ if (errno != EINVAL && errno != ENOSYS) {
+ /* These errors probably mean that we were
+ * compiled with timerfd/TFD_* support, but
+ * we're running on a kernel that lacks those.
+ */
+ event_warn("timerfd_create");
+ }
+ epollop->timerfd = -1;
+ }
+ } else {
+ epollop->timerfd = -1;
+ }
+#endif
+
+ evsig_init_(base);
+
+ return (epollop);
+}
+
+static const char *
+change_to_string(int change)
+{
+ change &= (EV_CHANGE_ADD|EV_CHANGE_DEL);
+ if (change == EV_CHANGE_ADD) {
+ return "add";
+ } else if (change == EV_CHANGE_DEL) {
+ return "del";
+ } else if (change == 0) {
+ return "none";
+ } else {
+ return "???";
+ }
+}
+
+static const char *
+epoll_op_to_string(int op)
+{
+ return op == EPOLL_CTL_ADD?"ADD":
+ op == EPOLL_CTL_DEL?"DEL":
+ op == EPOLL_CTL_MOD?"MOD":
+ "???";
+}
+
+#define PRINT_CHANGES(op, events, ch, status) \
+ "Epoll %s(%d) on fd %d " status ". " \
+ "Old events were %d; " \
+ "read change was %d (%s); " \
+ "write change was %d (%s); " \
+ "close change was %d (%s)", \
+ epoll_op_to_string(op), \
+ events, \
+ ch->fd, \
+ ch->old_events, \
+ ch->read_change, \
+ change_to_string(ch->read_change), \
+ ch->write_change, \
+ change_to_string(ch->write_change), \
+ ch->close_change, \
+ change_to_string(ch->close_change)
+
+static int
+epoll_apply_one_change(struct event_base *base,
+ struct epollop *epollop,
+ const struct event_change *ch)
+{
+ struct epoll_event epev;
+ int op, events = 0;
+ int idx;
+
+ idx = EPOLL_OP_TABLE_INDEX(ch);
+ op = epoll_op_table[idx].op;
+ events = epoll_op_table[idx].events;
+
+ if (!events) {
+ EVUTIL_ASSERT(op == 0);
+ return 0;
+ }
+
+ if ((ch->read_change|ch->write_change) & EV_CHANGE_ET)
+ events |= EPOLLET;
+
+ memset(&epev, 0, sizeof(epev));
+ epev.data.fd = ch->fd;
+ epev.events = events;
+ if (epoll_ctl(epollop->epfd, op, ch->fd, &epev) == 0) {
+ event_debug((PRINT_CHANGES(op, epev.events, ch, "okay")));
+ return 0;
+ }
+
+ switch (op) {
+ case EPOLL_CTL_MOD:
+ if (errno == ENOENT) {
+ /* If a MOD operation fails with ENOENT, the
+ * fd was probably closed and re-opened. We
+ * should retry the operation as an ADD.
+ */
+ if (epoll_ctl(epollop->epfd, EPOLL_CTL_ADD, ch->fd, &epev) == -1) {
+ event_warn("Epoll MOD(%d) on %d retried as ADD; that failed too",
+ (int)epev.events, ch->fd);
+ return -1;
+ } else {
+ event_debug(("Epoll MOD(%d) on %d retried as ADD; succeeded.",
+ (int)epev.events,
+ ch->fd));
+ return 0;
+ }
+ }
+ break;
+ case EPOLL_CTL_ADD:
+ if (errno == EEXIST) {
+ /* If an ADD operation fails with EEXIST,
+ * either the operation was redundant (as with a
+ * precautionary add), or we ran into a fun
+ * kernel bug where using dup*() to duplicate the
+ * same file into the same fd gives you the same epitem
+ * rather than a fresh one. For the second case,
+ * we must retry with MOD. */
+ if (epoll_ctl(epollop->epfd, EPOLL_CTL_MOD, ch->fd, &epev) == -1) {
+ event_warn("Epoll ADD(%d) on %d retried as MOD; that failed too",
+ (int)epev.events, ch->fd);
+ return -1;
+ } else {
+ event_debug(("Epoll ADD(%d) on %d retried as MOD; succeeded.",
+ (int)epev.events,
+ ch->fd));
+ return 0;
+ }
+ }
+ break;
+ case EPOLL_CTL_DEL:
+ if (errno == ENOENT || errno == EBADF || errno == EPERM) {
+ /* If a delete fails with one of these errors,
+ * that's fine too: we closed the fd before we
+ * got around to calling epoll_dispatch. */
+ event_debug(("Epoll DEL(%d) on fd %d gave %s: DEL was unnecessary.",
+ (int)epev.events,
+ ch->fd,
+ strerror(errno)));
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+
+ event_warn(PRINT_CHANGES(op, epev.events, ch, "failed"));
+ return -1;
+}
+
+static int
+epoll_apply_changes(struct event_base *base)
+{
+ struct event_changelist *changelist = &base->changelist;
+ struct epollop *epollop = base->evbase;
+ struct event_change *ch;
+
+ int r = 0;
+ int i;
+
+ for (i = 0; i < changelist->n_changes; ++i) {
+ ch = &changelist->changes[i];
+ if (epoll_apply_one_change(base, epollop, ch) < 0)
+ r = -1;
+ }
+
+ return (r);
+}
+
+static int
+epoll_nochangelist_add(struct event_base *base, evutil_socket_t fd,
+ short old, short events, void *p)
+{
+ struct event_change ch;
+ ch.fd = fd;
+ ch.old_events = old;
+ ch.read_change = ch.write_change = ch.close_change = 0;
+ if (events & EV_WRITE)
+ ch.write_change = EV_CHANGE_ADD |
+ (events & EV_ET);
+ if (events & EV_READ)
+ ch.read_change = EV_CHANGE_ADD |
+ (events & EV_ET);
+ if (events & EV_CLOSED)
+ ch.close_change = EV_CHANGE_ADD |
+ (events & EV_ET);
+
+ return epoll_apply_one_change(base, base->evbase, &ch);
+}
+
+static int
+epoll_nochangelist_del(struct event_base *base, evutil_socket_t fd,
+ short old, short events, void *p)
+{
+ struct event_change ch;
+ ch.fd = fd;
+ ch.old_events = old;
+ ch.read_change = ch.write_change = ch.close_change = 0;
+ if (events & EV_WRITE)
+ ch.write_change = EV_CHANGE_DEL;
+ if (events & EV_READ)
+ ch.read_change = EV_CHANGE_DEL;
+ if (events & EV_CLOSED)
+ ch.close_change = EV_CHANGE_DEL;
+
+ return epoll_apply_one_change(base, base->evbase, &ch);
+}
+
+static int
+epoll_dispatch(struct event_base *base, struct timeval *tv)
+{
+ struct epollop *epollop = base->evbase;
+ struct epoll_event *events = epollop->events;
+ int i, res;
+ long timeout = -1;
+
+#ifdef USING_TIMERFD
+ if (epollop->timerfd >= 0) {
+ struct itimerspec is;
+ is.it_interval.tv_sec = 0;
+ is.it_interval.tv_nsec = 0;
+ if (tv == NULL) {
+ /* No timeout; disarm the timer. */
+ is.it_value.tv_sec = 0;
+ is.it_value.tv_nsec = 0;
+ } else {
+ if (tv->tv_sec == 0 && tv->tv_usec == 0) {
+ /* we need to exit immediately; timerfd can't
+ * do that. */
+ timeout = 0;
+ }
+ is.it_value.tv_sec = tv->tv_sec;
+ is.it_value.tv_nsec = tv->tv_usec * 1000;
+ }
+ /* TODO: we could avoid unnecessary syscalls here by only
+ calling timerfd_settime when the top timeout changes, or
+ when we're called with a different timeval.
+ */
+ if (timerfd_settime(epollop->timerfd, 0, &is, NULL) < 0) {
+ event_warn("timerfd_settime");
+ }
+ } else
+#endif
+ if (tv != NULL) {
+ timeout = evutil_tv_to_msec_(tv);
+ if (timeout < 0 || timeout > MAX_EPOLL_TIMEOUT_MSEC) {
+ /* Linux kernels can wait forever if the timeout is
+ * too big; see comment on MAX_EPOLL_TIMEOUT_MSEC. */
+ timeout = MAX_EPOLL_TIMEOUT_MSEC;
+ }
+ }
+
+ epoll_apply_changes(base);
+ event_changelist_remove_all_(&base->changelist, base);
+
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+ res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout);
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ if (res == -1) {
+ if (errno != EINTR) {
+ event_warn("epoll_wait");
+ return (-1);
+ }
+
+ return (0);
+ }
+
+ event_debug(("%s: epoll_wait reports %d", __func__, res));
+ EVUTIL_ASSERT(res <= epollop->nevents);
+
+ for (i = 0; i < res; i++) {
+ int what = events[i].events;
+ short ev = 0;
+#ifdef USING_TIMERFD
+ if (events[i].data.fd == epollop->timerfd)
+ continue;
+#endif
+
+ if (what & (EPOLLHUP|EPOLLERR)) {
+ ev = EV_READ | EV_WRITE;
+ } else {
+ if (what & EPOLLIN)
+ ev |= EV_READ;
+ if (what & EPOLLOUT)
+ ev |= EV_WRITE;
+ if (what & EPOLLRDHUP)
+ ev |= EV_CLOSED;
+ }
+
+ if (!ev)
+ continue;
+
+ evmap_io_active_(base, events[i].data.fd, ev | EV_ET);
+ }
+
+ if (res == epollop->nevents && epollop->nevents < MAX_NEVENT) {
+ /* We used all of the event space this time. We should
+ be ready for more events next time. */
+ int new_nevents = epollop->nevents * 2;
+ struct epoll_event *new_events;
+
+ new_events = mm_realloc(epollop->events,
+ new_nevents * sizeof(struct epoll_event));
+ if (new_events) {
+ epollop->events = new_events;
+ epollop->nevents = new_nevents;
+ }
+ }
+
+ return (0);
+}
+
+
+static void
+epoll_dealloc(struct event_base *base)
+{
+ struct epollop *epollop = base->evbase;
+
+ evsig_dealloc_(base);
+ if (epollop->events)
+ mm_free(epollop->events);
+ if (epollop->epfd >= 0)
+ close(epollop->epfd);
+#ifdef USING_TIMERFD
+ if (epollop->timerfd >= 0)
+ close(epollop->timerfd);
+#endif
+
+ memset(epollop, 0, sizeof(struct epollop));
+ mm_free(epollop);
+}
+
+#endif /* EVENT__HAVE_EPOLL */
diff --git a/libs/libevent/docs/epoll_sub.c b/libs/libevent/docs/epoll_sub.c
new file mode 100644
index 0000000000..3f01f6a699
--- /dev/null
+++ b/libs/libevent/docs/epoll_sub.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2003-2009 Niels Provos <provos@citi.umich.edu>
+ * Copyright 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "evconfig-private.h"
+#include <stdint.h>
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+#include <sys/epoll.h>
+#include <unistd.h>
+#include <errno.h>
+
+int
+epoll_create(int size)
+{
+#if !defined(__NR_epoll_create) && defined(__NR_epoll_create1)
+ if (size <= 0) {
+ errno = EINVAL;
+ return -1;
+ }
+ return (syscall(__NR_epoll_create1, 0));
+#else
+ return (syscall(__NR_epoll_create, size));
+#endif
+}
+
+int
+epoll_ctl(int epfd, int op, int fd, struct epoll_event *event)
+{
+
+ return (syscall(__NR_epoll_ctl, epfd, op, fd, event));
+}
+
+int
+epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout)
+{
+#if !defined(__NR_epoll_wait) && defined(__NR_epoll_pwait)
+ return (syscall(__NR_epoll_pwait, epfd, events, maxevents, timeout, NULL, 0));
+#else
+ return (syscall(__NR_epoll_wait, epfd, events, maxevents, timeout));
+#endif
+}
diff --git a/libs/libevent/docs/evconfig-private.h.cmake b/libs/libevent/docs/evconfig-private.h.cmake
new file mode 100644
index 0000000000..32f04794d2
--- /dev/null
+++ b/libs/libevent/docs/evconfig-private.h.cmake
@@ -0,0 +1,35 @@
+
+#ifndef EVCONFIG_PRIVATE_H_INCLUDED_
+#define EVCONFIG_PRIVATE_H_INCLUDED_
+
+/* Enable extensions on AIX 3, Interix. */
+#cmakedefine _ALL_SOURCE
+
+/* Enable GNU extensions on systems that have them. */
+#cmakedefine _GNU_SOURCE 1
+
+/* Enable threading extensions on Solaris. */
+#cmakedefine _POSIX_PTHREAD_SEMANTICS 1
+
+/* Enable extensions on HP NonStop. */
+#cmakedefine _TANDEM_SOURCE 1
+
+/* Enable general extensions on Solaris. */
+#cmakedefine __EXTENSIONS__
+
+/* Number of bits in a file offset, on hosts where this is settable. */
+#cmakedefine _FILE_OFFSET_BITS 1
+/* Define for large files, on AIX-style hosts. */
+#cmakedefine _LARGE_FILES 1
+
+/* Define to 1 if on MINIX. */
+#cmakedefine _MINIX 1
+
+/* Define to 2 if the system does not provide POSIX.1 features except with
+ this defined. */
+#cmakedefine _POSIX_1_SOURCE 1
+
+/* Define to 1 if you need to in order for `stat' and other things to work. */
+#cmakedefine _POSIX_SOURCE 1
+
+#endif
diff --git a/libs/libevent/docs/evconfig-private.h.in b/libs/libevent/docs/evconfig-private.h.in
new file mode 100644
index 0000000000..7b3dfdb10e
--- /dev/null
+++ b/libs/libevent/docs/evconfig-private.h.in
@@ -0,0 +1,48 @@
+/* evconfig-private.h template - see "Configuration Header Templates" */
+/* in AC manual. Kevin Bowling <kevin.bowling@kev009.com */
+#ifndef EVCONFIG_PRIVATE_H_INCLUDED_
+#define EVCONFIG_PRIVATE_H_INCLUDED_
+
+/* Enable extensions on AIX 3, Interix. */
+#ifndef _ALL_SOURCE
+# undef _ALL_SOURCE
+#endif
+/* Enable GNU extensions on systems that have them. */
+#ifndef _GNU_SOURCE
+# undef _GNU_SOURCE
+#endif
+/* Enable threading extensions on Solaris. */
+#ifndef _POSIX_PTHREAD_SEMANTICS
+# undef _POSIX_PTHREAD_SEMANTICS
+#endif
+/* Enable extensions on HP NonStop. */
+#ifndef _TANDEM_SOURCE
+# undef _TANDEM_SOURCE
+#endif
+/* Enable general extensions on Solaris. */
+#ifndef __EXTENSIONS__
+# undef __EXTENSIONS__
+#endif
+
+/* Number of bits in a file offset, on hosts where this is settable. */
+#undef _FILE_OFFSET_BITS
+/* Define for large files, on AIX-style hosts. */
+#undef _LARGE_FILES
+
+/* Define to 1 if on MINIX. */
+#ifndef _MINIX
+#undef _MINIX
+#endif
+
+/* Define to 2 if the system does not provide POSIX.1 features except with
+ this defined. */
+#ifndef _POSIX_1_SOURCE
+#undef _POSIX_1_SOURCE
+#endif
+
+/* Define to 1 if you need to in order for `stat' and other things to work. */
+#ifndef _POSIX_SOURCE
+#undef _POSIX_SOURCE
+#endif
+
+#endif
diff --git a/libs/libevent/docs/evdns.3 b/libs/libevent/docs/evdns.3
new file mode 100644
index 0000000000..10414fa2ef
--- /dev/null
+++ b/libs/libevent/docs/evdns.3
@@ -0,0 +1,322 @@
+.\"
+.\" Copyright (c) 2006 Niels Provos <provos@citi.umich.edu>
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\"
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. The name of the author may not be used to endorse or promote products
+.\" derived from this software without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+.\" INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+.\" AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+.\" THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+.\" EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+.\" PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+.\" OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+.\" WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+.\" OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+.\" ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+.\"
+.Dd October 7, 2006
+.Dt EVDNS 3
+.Os
+.Sh NAME
+.Nm evdns_init
+.Nm evdns_shutdown
+.Nm evdns_err_to_string
+.Nm evdns_nameserver_add
+.Nm evdns_count_nameservers
+.Nm evdns_clear_nameservers_and_suspend
+.Nm evdns_resume
+.Nm evdns_nameserver_ip_add
+.Nm evdns_resolve_ipv4
+.Nm evdns_resolve_reverse
+.Nm evdns_resolv_conf_parse
+.Nm evdns_config_windows_nameservers
+.Nm evdns_search_clear
+.Nm evdns_search_add
+.Nm evdns_search_ndots_set
+.Nm evdns_set_log_fn
+.Nd asynchronous functions for DNS resolution.
+.Sh SYNOPSIS
+.Fd #include <sys/time.h>
+.Fd #include <event.h>
+.Fd #include <evdns.h>
+.Ft int
+.Fn evdns_init
+.Ft void
+.Fn evdns_shutdown "int fail_requests"
+.Ft "const char *"
+.Fn evdns_err_to_string "int err"
+.Ft int
+.Fn evdns_nameserver_add "unsigned long int address"
+.Ft int
+.Fn evdns_count_nameservers
+.Ft int
+.Fn evdns_clear_nameservers_and_suspend
+.Ft int
+.Fn evdns_resume
+.Ft int
+.Fn evdns_nameserver_ip_add(const char *ip_as_string);
+.Ft int
+.Fn evdns_resolve_ipv4 "const char *name" "int flags" "evdns_callback_type callback" "void *ptr"
+.Ft int
+.Fn evdns_resolve_reverse "struct in_addr *in" "int flags" "evdns_callback_type callback" "void *ptr"
+.Ft int
+.Fn evdns_resolv_conf_parse "int flags" "const char *"
+.Ft void
+.Fn evdns_search_clear
+.Ft void
+.Fn evdns_search_add "const char *domain"
+.Ft void
+.Fn evdns_search_ndots_set "const int ndots"
+.Ft void
+.Fn evdns_set_log_fn "evdns_debug_log_fn_type fn"
+.Ft int
+.Fn evdns_config_windows_nameservers
+.Sh DESCRIPTION
+Welcome, gentle reader
+.Pp
+Async DNS lookups are really a whole lot harder than they should be,
+mostly stemming from the fact that the libc resolver has never been
+very good at them. Before you use this library you should see if libc
+can do the job for you with the modern async call getaddrinfo_a
+(see http://www.imperialviolet.org/page25.html#e498). Otherwise,
+please continue.
+.Pp
+This code is based on libevent and you must call event_init before
+any of the APIs in this file. You must also seed the OpenSSL random
+source if you are using OpenSSL for ids (see below).
+.Pp
+This library is designed to be included and shipped with your source
+code. You statically link with it. You should also test for the
+existence of strtok_r and define HAVE_STRTOK_R if you have it.
+.Pp
+The DNS protocol requires a good source of id numbers and these
+numbers should be unpredictable for spoofing reasons. There are
+three methods for generating them here and you must define exactly
+one of them. In increasing order of preference:
+.Pp
+.Bl -tag -width "DNS_USE_GETTIMEOFDAY_FOR_ID" -compact -offset indent
+.It DNS_USE_GETTIMEOFDAY_FOR_ID
+Using the bottom 16 bits of the usec result from gettimeofday. This
+is a pretty poor solution but should work anywhere.
+.It DNS_USE_CPU_CLOCK_FOR_ID
+Using the bottom 16 bits of the nsec result from the CPU's time
+counter. This is better, but may not work everywhere. Requires
+POSIX realtime support and you'll need to link against -lrt on
+glibc systems at least.
+.It DNS_USE_OPENSSL_FOR_ID
+Uses the OpenSSL RAND_bytes call to generate the data. You must
+have seeded the pool before making any calls to this library.
+.El
+.Pp
+The library keeps track of the state of nameservers and will avoid
+them when they go down. Otherwise it will round robin between them.
+.Pp
+Quick start guide:
+ #include "evdns.h"
+ void callback(int result, char type, int count, int ttl,
+ void *addresses, void *arg);
+ evdns_resolv_conf_parse(DNS_OPTIONS_ALL, "/etc/resolv.conf");
+ evdns_resolve("www.hostname.com", 0, callback, NULL);
+.Pp
+When the lookup is complete the callback function is called. The
+first argument will be one of the DNS_ERR_* defines in evdns.h.
+Hopefully it will be DNS_ERR_NONE, in which case type will be
+DNS_IPv4_A, count will be the number of IP addresses, ttl is the time
+which the data can be cached for (in seconds), addresses will point
+to an array of uint32_t's and arg will be whatever you passed to
+evdns_resolve.
+.Pp
+Searching:
+.Pp
+In order for this library to be a good replacement for glibc's resolver it
+supports searching. This involves setting a list of default domains, in
+which names will be queried for. The number of dots in the query name
+determines the order in which this list is used.
+.Pp
+Searching appears to be a single lookup from the point of view of the API,
+although many DNS queries may be generated from a single call to
+evdns_resolve. Searching can also drastically slow down the resolution
+of names.
+.Pp
+To disable searching:
+.Bl -enum -compact -offset indent
+.It
+Never set it up. If you never call
+.Fn evdns_resolv_conf_parse,
+.Fn evdns_init,
+or
+.Fn evdns_search_add
+then no searching will occur.
+.It
+If you do call
+.Fn evdns_resolv_conf_parse
+then don't pass
+.Va DNS_OPTION_SEARCH
+(or
+.Va DNS_OPTIONS_ALL,
+which implies it).
+.It
+When calling
+.Fn evdns_resolve,
+pass the
+.Va DNS_QUERY_NO_SEARCH
+flag.
+.El
+.Pp
+The order of searches depends on the number of dots in the name. If the
+number is greater than the ndots setting then the names is first tried
+globally. Otherwise each search domain is appended in turn.
+.Pp
+The ndots setting can either be set from a resolv.conf, or by calling
+evdns_search_ndots_set.
+.Pp
+For example, with ndots set to 1 (the default) and a search domain list of
+["myhome.net"]:
+ Query: www
+ Order: www.myhome.net, www.
+.Pp
+ Query: www.abc
+ Order: www.abc., www.abc.myhome.net
+.Pp
+.Sh API reference
+.Pp
+.Bl -tag -width 0123456
+.It Ft int Fn evdns_init
+Initializes support for non-blocking name resolution by calling
+.Fn evdns_resolv_conf_parse
+on UNIX and
+.Fn evdns_config_windows_nameservers
+on Windows.
+.It Ft int Fn evdns_nameserver_add "unsigned long int address"
+Add a nameserver. The address should be an IP address in
+network byte order. The type of address is chosen so that
+it matches in_addr.s_addr.
+Returns non-zero on error.
+.It Ft int Fn evdns_nameserver_ip_add "const char *ip_as_string"
+This wraps the above function by parsing a string as an IP
+address and adds it as a nameserver.
+Returns non-zero on error
+.It Ft int Fn evdns_resolve "const char *name" "int flags" "evdns_callback_type callback" "void *ptr"
+Resolve a name. The name parameter should be a DNS name.
+The flags parameter should be 0, or DNS_QUERY_NO_SEARCH
+which disables searching for this query. (see defn of
+searching above).
+.Pp
+The callback argument is a function which is called when
+this query completes and ptr is an argument which is passed
+to that callback function.
+.Pp
+Returns non-zero on error
+.It Ft void Fn evdns_search_clear
+Clears the list of search domains
+.It Ft void Fn evdns_search_add "const char *domain"
+Add a domain to the list of search domains
+.It Ft void Fn evdns_search_ndots_set "int ndots"
+Set the number of dots which, when found in a name, causes
+the first query to be without any search domain.
+.It Ft int Fn evdns_count_nameservers "void"
+Return the number of configured nameservers (not necessarily the
+number of running nameservers). This is useful for double-checking
+whether our calls to the various nameserver configuration functions
+have been successful.
+.It Ft int Fn evdns_clear_nameservers_and_suspend "void"
+Remove all currently configured nameservers, and suspend all pending
+resolves. Resolves will not necessarily be re-attempted until
+evdns_resume() is called.
+.It Ft int Fn evdns_resume "void"
+Re-attempt resolves left in limbo after an earlier call to
+evdns_clear_nameservers_and_suspend().
+.It Ft int Fn evdns_config_windows_nameservers "void"
+Attempt to configure a set of nameservers based on platform settings on
+a win32 host. Preferentially tries to use GetNetworkParams; if that fails,
+looks in the registry. Returns 0 on success, nonzero on failure.
+.It Ft int Fn evdns_resolv_conf_parse "int flags" "const char *filename"
+Parse a resolv.conf like file from the given filename.
+.Pp
+See the man page for resolv.conf for the format of this file.
+The flags argument determines what information is parsed from
+this file:
+.Bl -tag -width "DNS_OPTION_NAMESERVERS" -offset indent -compact -nested
+.It DNS_OPTION_SEARCH
+domain, search and ndots options
+.It DNS_OPTION_NAMESERVERS
+nameserver lines
+.It DNS_OPTION_MISC
+timeout and attempts options
+.It DNS_OPTIONS_ALL
+all of the above
+.El
+.Pp
+The following directives are not parsed from the file:
+ sortlist, rotate, no-check-names, inet6, debug
+.Pp
+Returns non-zero on error:
+.Bl -tag -width "0" -offset indent -compact -nested
+.It 0
+no errors
+.It 1
+failed to open file
+.It 2
+failed to stat file
+.It 3
+file too large
+.It 4
+out of memory
+.It 5
+short read from file
+.El
+.El
+.Sh Internals:
+Requests are kept in two queues. The first is the inflight queue. In
+this queue requests have an allocated transaction id and nameserver.
+They will soon be transmitted if they haven't already been.
+.Pp
+The second is the waiting queue. The size of the inflight ring is
+limited and all other requests wait in waiting queue for space. This
+bounds the number of concurrent requests so that we don't flood the
+nameserver. Several algorithms require a full walk of the inflight
+queue and so bounding its size keeps thing going nicely under huge
+(many thousands of requests) loads.
+.Pp
+If a nameserver loses too many requests it is considered down and we
+try not to use it. After a while we send a probe to that nameserver
+(a lookup for google.com) and, if it replies, we consider it working
+again. If the nameserver fails a probe we wait longer to try again
+with the next probe.
+.Sh SEE ALSO
+.Xr event 3 ,
+.Xr gethostbyname 3 ,
+.Xr resolv.conf 5
+.Sh HISTORY
+The
+.Nm evdns
+API was developed by Adam Langley on top of the
+.Nm libevent
+API.
+The code was integrate into
+.Nm Tor
+by Nick Mathewson and finally put into
+.Nm libevent
+itself by Niels Provos.
+.Sh AUTHORS
+The
+.Nm evdns
+API and code was written by Adam Langley with significant
+contributions by Nick Mathewson.
+.Sh BUGS
+This documentation is neither complete nor authoritative.
+If you are in doubt about the usage of this API then
+check the source code to find out how it works, write
+up the missing piece of documentation and send it to
+me for inclusion in this man page.
diff --git a/libs/libevent/docs/event-config.h.cmake b/libs/libevent/docs/event-config.h.cmake
new file mode 100644
index 0000000000..76e0d1f981
--- /dev/null
+++ b/libs/libevent/docs/event-config.h.cmake
@@ -0,0 +1,534 @@
+/* event-config.h
+ *
+ * This file was generated by cmake when the makefiles were generated.
+ *
+ * DO NOT EDIT THIS FILE.
+ *
+ * Do not rely on macros in this file existing in later versions.
+ */
+#ifndef EVENT2_EVENT_CONFIG_H_INCLUDED_
+#define EVENT2_EVENT_CONFIG_H_INCLUDED_
+
+/* Numeric representation of the version */
+#define EVENT__NUMERIC_VERSION @EVENT_NUMERIC_VERSION@
+#define EVENT__PACKAGE_VERSION "@EVENT_PACKAGE_VERSION@"
+
+#define EVENT__VERSION_MAJOR @EVENT_VERSION_MAJOR@
+#define EVENT__VERSION_MINOR @EVENT_VERSION_MINOR@
+#define EVENT__VERSION_PATCH @EVENT_VERSION_PATCH@
+
+/* Version number of package */
+#define EVENT__VERSION "@EVENT_VERSION@"
+
+/* Name of package */
+#define EVENT__PACKAGE "libevent"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define EVENT__PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define EVENT__PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define EVENT__PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define EVENT__PACKAGE_TARNAME ""
+
+/* Define if libevent should build without support for a debug mode */
+#cmakedefine EVENT__DISABLE_DEBUG_MODE
+
+/* Define if libevent should not allow replacing the mm functions */
+#cmakedefine EVENT__DISABLE_MM_REPLACEMENT
+
+/* Define if libevent should not be compiled with thread support */
+#cmakedefine EVENT__DISABLE_THREAD_SUPPORT
+
+/* Define to 1 if you have the `accept4' function. */
+#cmakedefine EVENT__HAVE_ACCEPT4
+
+/* Define to 1 if you have the `arc4random' function. */
+#cmakedefine EVENT__HAVE_ARC4RANDOM
+
+/* Define to 1 if you have the `arc4random_buf' function. */
+#cmakedefine EVENT__HAVE_ARC4RANDOM_BUF
+
+/* Define if clock_gettime is available in libc */
+#cmakedefine EVENT__DNS_USE_CPU_CLOCK_FOR_ID
+
+/* Define is no secure id variant is available */
+#cmakedefine EVENT__DNS_USE_GETTIMEOFDAY_FOR_ID
+#cmakedefine EVENT__DNS_USE_FTIME_FOR_ID
+
+/* Define to 1 if you have the <arpa/inet.h> header file. */
+#cmakedefine EVENT__HAVE_ARPA_INET_H
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#cmakedefine EVENT__HAVE_CLOCK_GETTIME
+
+/* Define to 1 if you have the declaration of `CTL_KERN'. */
+#cmakedefine EVENT__HAVE_DECL_CTL_KERN
+
+/* Define to 1 if you have the declaration of `KERN_ARND'. */
+#cmakedefine EVENT__HAVE_DECL_KERN_ARND
+
+/* Define to 1 if you have the declaration of `KERN_RANDOM'. */
+#cmakedefine EVENT__HAVE_DECL_KERN_RANDOM
+
+/* Define if /dev/poll is available */
+#cmakedefine EVENT__HAVE_DEVPOLL
+
+/* Define to 1 if you have the <netdb.h> header file. */
+#cmakedefine EVENT__HAVE_NETDB_H
+
+/* Define to 1 if fd_mask type is defined */
+#cmakedefine EVENT__HAVE_FD_MASK
+
+/* Define to 1 if the <sys/queue.h> header file defines TAILQ_FOREACH. */
+#cmakedefine EVENT__HAVE_TAILQFOREACH
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#cmakedefine EVENT__HAVE_DLFCN_H
+
+/* Define if your system supports the epoll system calls */
+#cmakedefine EVENT__HAVE_EPOLL
+
+/* Define to 1 if you have the `epoll_create1' function. */
+#cmakedefine EVENT__HAVE_EPOLL_CREATE1
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+#cmakedefine EVENT__HAVE_EPOLL_CTL
+
+/* Define to 1 if you have the `eventfd' function. */
+#cmakedefine EVENT__HAVE_EVENTFD
+
+/* Define if your system supports event ports */
+#cmakedefine EVENT__HAVE_EVENT_PORTS
+
+/* Define to 1 if you have the `fcntl' function. */
+#cmakedefine EVENT__HAVE_FCNTL
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#cmakedefine EVENT__HAVE_FCNTL_H
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+#cmakedefine EVENT__HAVE_GETADDRINFO
+
+/* Define to 1 if you have the `getegid' function. */
+#cmakedefine EVENT__HAVE_GETEGID
+
+/* Define to 1 if you have the `geteuid' function. */
+#cmakedefine EVENT__HAVE_GETEUID
+
+/* TODO: Check for different gethostname argument counts. CheckPrototypeDefinition.cmake can be used. */
+/* Define this if you have any gethostbyname_r() */
+#cmakedefine EVENT__HAVE_GETHOSTBYNAME_R
+
+/* Define this if gethostbyname_r takes 3 arguments */
+#cmakedefine EVENT__HAVE_GETHOSTBYNAME_R_3_ARG
+
+/* Define this if gethostbyname_r takes 5 arguments */
+#cmakedefine EVENT__HAVE_GETHOSTBYNAME_R_5_ARG
+
+/* Define this if gethostbyname_r takes 6 arguments */
+#cmakedefine EVENT__HAVE_GETHOSTBYNAME_R_6_ARG
+
+/* Define to 1 if you have the `getifaddrs' function. */
+#cmakedefine EVENT__HAVE_GETIFADDRS
+
+/* Define to 1 if you have the `getnameinfo' function. */
+#cmakedefine EVENT__HAVE_GETNAMEINFO
+
+/* Define to 1 if you have the `getprotobynumber' function. */
+#cmakedefine EVENT__HAVE_GETPROTOBYNUMBER
+
+/* Define to 1 if you have the `getservbyname' function. */
+#cmakedefine EVENT__HAVE_GETSERVBYNAME
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#cmakedefine EVENT__HAVE_GETTIMEOFDAY
+
+/* Define to 1 if you have the <ifaddrs.h> header file. */
+#cmakedefine EVENT__HAVE_IFADDRS_H
+
+/* Define to 1 if you have the `inet_ntop' function. */
+#cmakedefine EVENT__HAVE_INET_NTOP
+
+/* Define to 1 if you have the `inet_pton' function. */
+#cmakedefine EVENT__HAVE_INET_PTON
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#cmakedefine EVENT__HAVE_INTTYPES_H
+
+/* Define to 1 if you have the `issetugid' function. */
+#cmakedefine EVENT__HAVE_ISSETUGID
+
+/* Define to 1 if you have the `kqueue' function. */
+#cmakedefine EVENT__HAVE_KQUEUE
+
+/* Define if the system has zlib */
+#cmakedefine EVENT__HAVE_LIBZ
+
+/* Define to 1 if you have the `mach_absolute_time' function. */
+#cmakedefine EVENT__HAVE_MACH_ABSOLUTE_TIME
+
+/* Define to 1 if you have the <mach/mach_time.h> header file. */
+#cmakedefine EVENT__HAVE_MACH_MACH_TIME_H
+
+/* Define to 1 if you have the <memory.h> header file. */
+#cmakedefine EVENT__HAVE_MEMORY_H
+
+/* Define to 1 if you have the `mmap' function. */
+#cmakedefine EVENT__HAVE_MMAP
+
+/* Define to 1 if you have the `nanosleep' function. */
+#cmakedefine EVENT__HAVE_NANOSLEEP
+
+/* Define to 1 if you have the `usleep' function. */
+#cmakedefine EVENT__HAVE_USLEEP
+
+/* Define to 1 if you have the <netdb.h> header file. */
+#cmakedefine EVENT__HAVE_NETDB_H
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+#cmakedefine EVENT__HAVE_NETINET_IN6_H
+
+/* Define to 1 if you have the <netinet/in.h> header file. */
+#cmakedefine EVENT__HAVE_NETINET_IN_H
+
+/* Define to 1 if you have the <netinet/tcp.h> header file. */
+#cmakedefine EVENT__HAVE_NETINET_TCP_H
+
+/* Define if the system has openssl */
+#cmakedefine EVENT__HAVE_OPENSSL
+
+/* Defines if the system has zlib */
+#cmakedefine EVENT__HAVE_ZLIB
+
+/* Define to 1 if you have the `pipe' function. */
+#cmakedefine EVENT__HAVE_PIPE
+
+/* Define to 1 if you have the `pipe2' function. */
+#cmakedefine EVENT__HAVE_PIPE2
+
+/* Define to 1 if you have the `poll' function. */
+#cmakedefine EVENT__HAVE_POLL
+
+/* Define to 1 if you have the <poll.h> header file. */
+#cmakedefine EVENT__HAVE_POLL_H
+
+/* Define to 1 if you have the `port_create' function. */
+#cmakedefine EVENT__HAVE_PORT_CREATE
+
+/* Define to 1 if you have the <port.h> header file. */
+#cmakedefine EVENT__HAVE_PORT_H
+
+/* Define if you have POSIX threads libraries and header files. */
+#cmakedefine EVENT__HAVE_PTHREAD
+
+/* Define if we have pthreads on this system */
+#cmakedefine EVENT__HAVE_PTHREADS
+
+/* Define to 1 if you have the `putenv' function. */
+#cmakedefine EVENT__HAVE_PUTENV
+
+/* Define to 1 if the system has the type `sa_family_t'. */
+#cmakedefine EVENT__HAVE_SA_FAMILY_T
+
+/* Define to 1 if you have the `select' function. */
+#cmakedefine EVENT__HAVE_SELECT
+
+/* Define to 1 if you have the `setenv' function. */
+#cmakedefine EVENT__HAVE_SETENV
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+#cmakedefine EVENT__HAVE_SETFD
+
+/* Define to 1 if you have the `setrlimit' function. */
+#cmakedefine EVENT__HAVE_SETRLIMIT
+
+/* Define to 1 if you have the `sendfile' function. */
+#cmakedefine EVENT__HAVE_SENDFILE
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+#cmakedefine EVENT__HAVE_SETFD
+
+/* Define to 1 if you have the `sigaction' function. */
+#cmakedefine EVENT__HAVE_SIGACTION
+
+/* Define to 1 if you have the `signal' function. */
+#cmakedefine EVENT__HAVE_SIGNAL
+
+/* Define to 1 if you have the `splice' function. */
+#cmakedefine EVENT__HAVE_SPLICE
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#cmakedefine EVENT__HAVE_STDARG_H
+
+/* Define to 1 if you have the <stddef.h> header file. */
+#cmakedefine EVENT__HAVE_STDDEF_H
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#cmakedefine EVENT__HAVE_STDINT_H
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#cmakedefine EVENT__HAVE_STDLIB_H
+
+/* Define to 1 if you have the <strings.h> header file. */
+#cmakedefine EVENT__HAVE_STRINGS_H
+
+/* Define to 1 if you have the <string.h> header file. */
+#cmakedefine EVENT__HAVE_STRING_H
+
+/* Define to 1 if you have the `strlcpy' function. */
+#cmakedefine EVENT__HAVE_STRLCPY
+
+/* Define to 1 if you have the `strsep' function. */
+#cmakedefine EVENT__HAVE_STRSEP
+
+/* Define to 1 if you have the `strtok_r' function. */
+#cmakedefine EVENT__HAVE_STRTOK_R
+
+/* Define to 1 if you have the `strtoll' function. */
+#cmakedefine EVENT__HAVE_STRTOLL
+
+/* Define to 1 if the system has the type `struct addrinfo'. */
+#cmakedefine EVENT__HAVE_STRUCT_ADDRINFO
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#cmakedefine EVENT__HAVE_STRUCT_IN6_ADDR
+
+/* Define to 1 if `s6_addr16' is member of `struct in6_addr'. */
+#cmakedefine EVENT__HAVE_STRUCT_IN6_ADDR_S6_ADDR16
+
+/* Define to 1 if `s6_addr32' is member of `struct in6_addr'. */
+#cmakedefine EVENT__HAVE_STRUCT_IN6_ADDR_S6_ADDR32
+
+/* Define to 1 if the system has the type `struct sockaddr_in6'. */
+#cmakedefine EVENT__HAVE_STRUCT_SOCKADDR_IN6
+
+/* Define to 1 if `sin6_len' is member of `struct sockaddr_in6'. */
+#cmakedefine EVENT__HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN
+
+/* Define to 1 if `sin_len' is member of `struct sockaddr_in'. */
+#cmakedefine EVENT__HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
+
+/* Define to 1 if the system has the type `struct sockaddr_storage'. */
+#cmakedefine EVENT__HAVE_STRUCT_SOCKADDR_STORAGE
+
+/* Define to 1 if `ss_family' is a member of `struct sockaddr_storage'. */
+#cmakedefine EVENT__HAVE_STRUCT_SOCKADDR_STORAGE_SS_FAMILY
+
+/* Define to 1 if `__ss_family' is a member of `struct sockaddr_storage'. */
+#cmakedefine EVENT__HAVE_STRUCT_SOCKADDR_STORAGE___SS_FAMILY
+
+/* Define to 1 if you have the `sysctl' function. */
+#cmakedefine EVENT__HAVE_SYSCTL
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_DEVPOLL_H
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_EPOLL_H
+
+/* Define to 1 if you have the <sys/eventfd.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_EVENTFD_H
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_EVENT_H
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_IOCTL_H
+
+/* Define to 1 if you have the <sys/mman.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_MMAN_H
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_PARAM_H
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_QUEUE_H
+
+/* Define to 1 if you have the <sys/resource.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_RESOURCE_H
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_SELECT_H
+
+/* Define to 1 if you have the <sys/sendfile.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_SENDFILE_H
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_SOCKET_H
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_STAT_H
+
+/* Define to 1 if you have the <sys/sysctl.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_SYSCTL_H
+
+/* Define to 1 if you have the <sys/timerfd.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_TIMERFD_H */
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_TIME_H
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_TYPES_H
+
+/* Define to 1 if you have the <sys/uio.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_UIO_H
+
+/* Define to 1 if you have the <sys/wait.h> header file. */
+#cmakedefine EVENT__HAVE_SYS_WAIT_H
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+#cmakedefine EVENT__HAVE_TAILQFOREACH
+
+/* Define if timeradd is defined in <sys/time.h> */
+#cmakedefine EVENT__HAVE_TIMERADD
+
+/* Define if timerclear is defined in <sys/time.h> */
+#cmakedefine EVENT__HAVE_TIMERCLEAR
+
+/* Define if timercmp is defined in <sys/time.h> */
+#cmakedefine EVENT__HAVE_TIMERCMP
+
+/* Define to 1 if you have the `timerfd_create' function. */
+#cmakedefine EVENT__HAVE_TIMERFD_CREATE
+
+/* Define if timerisset is defined in <sys/time.h> */
+#cmakedefine EVENT__HAVE_TIMERISSET
+
+/* Define to 1 if the system has the type `uint8_t'. */
+#cmakedefine EVENT__HAVE_UINT8_T
+
+/* Define to 1 if the system has the type `uint16_t'. */
+#cmakedefine EVENT__HAVE_UINT16_T
+
+/* Define to 1 if the system has the type `uint32_t'. */
+#cmakedefine EVENT__HAVE_UINT32_T
+
+/* Define to 1 if the system has the type `uint64_t'. */
+#cmakedefine EVENT__HAVE_UINT64_T
+
+/* Define to 1 if the system has the type `uintptr_t'. */
+#cmakedefine EVENT__HAVE_UINTPTR_T
+
+/* Define to 1 if you have the `umask' function. */
+#cmakedefine EVENT__HAVE_UMASK
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#cmakedefine EVENT__HAVE_UNISTD_H
+
+/* Define to 1 if you have the `unsetenv' function. */
+#cmakedefine EVENT__HAVE_UNSETENV
+
+/* Define to 1 if you have the `vasprintf' function. */
+#cmakedefine EVENT__HAVE_VASPRINTF
+
+/* Define if kqueue works correctly with pipes */
+#cmakedefine EVENT__HAVE_WORKING_KQUEUE
+
+#ifdef __USE_UNUSED_DEFINITIONS__
+/* Define to necessary symbol if this constant uses a non-standard name on your system. */
+/* XXX: Hello, this isn't even used, nor is it defined anywhere... - Ellzey */
+#define EVENT__PTHREAD_CREATE_JOINABLE ${EVENT__PTHREAD_CREATE_JOINABLE}
+#endif
+
+/* The size of `pthread_t', as computed by sizeof. */
+#define EVENT__SIZEOF_PTHREAD_T @EVENT__SIZEOF_PTHREAD_T@
+
+/* The size of a `int', as computed by sizeof. */
+#define EVENT__SIZEOF_INT @EVENT__SIZEOF_INT@
+
+/* The size of a `long', as computed by sizeof. */
+#define EVENT__SIZEOF_LONG @EVENT__SIZEOF_LONG@
+
+/* The size of a `long long', as computed by sizeof. */
+#define EVENT__SIZEOF_LONG_LONG @EVENT__SIZEOF_LONG_LONG@
+
+/* The size of `off_t', as computed by sizeof. */
+#define EVENT__SIZEOF_OFF_T @EVENT__SIZEOF_OFF_T@
+
+#define EVENT__SIZEOF_SSIZE_T @EVENT__SIZEOF_SSIZE_T@
+
+
+/* The size of a `short', as computed by sizeof. */
+#define EVENT__SIZEOF_SHORT @EVENT__SIZEOF_SHORT@
+
+/* The size of `size_t', as computed by sizeof. */
+#define EVENT__SIZEOF_SIZE_T @EVENT__SIZEOF_SIZE_T@
+
+/* Define to 1 if you have the ANSI C header files. */
+#cmakedefine EVENT__STDC_HEADERS
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#cmakedefine EVENT__TIME_WITH_SYS_TIME
+
+/* The size of `socklen_t', as computed by sizeof. */
+#define EVENT__SIZEOF_SOCKLEN_T @EVENT__SIZEOF_SOCKLEN_T@
+
+/* The size of 'void *', as computer by sizeof */
+#define EVENT__SIZEOF_VOID_P @EVENT__SIZEOF_VOID_P@
+
+/* set an alias for whatever __func__ __FUNCTION__ is, what sillyness */
+#if defined (__func__)
+#define EVENT____func__ __func__
+#elif defined(__FUNCTION__)
+#define EVENT____func__ __FUNCTION__
+#else
+#define EVENT____func__ __FILE__
+#endif
+
+
+#ifdef __THESE_ARE_NOT_CONFIG_H_THINGS_THEY_ARE_DASH_D_THINGS__
+/* Number of bits in a file offset, on hosts where this is settable. */
+/* Ellzey is not satisfied */
+#define EVENT___FILE_OFFSET_BITS @EVENT___FILE_OFFSET_BITS@
+
+/* Define for large files, on AIX-style hosts. */
+#define @_LARGE_FILES@
+#endif
+
+#ifdef _WhAT_DOES_THIS_EVEN_DO_
+/* Define to empty if `const' does not conform to ANSI C. */
+/* lolwut? - ellzey */
+#undef EVENT__const
+#endif
+
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+ calls it, or to nothing if 'inline' is not supported under any name. */
+#ifndef __cplusplus
+/* why not c++?
+ *
+ * and are we really expected to use EVENT__inline everywhere,
+ * shouldn't we just do:
+ * ifdef EVENT__inline
+ * define inline EVENT__inline
+ *
+ * - Ellzey
+ */
+
+#define EVENT__inline @EVENT__inline@
+#endif
+
+/* Define to `int' if <sys/tyes.h> does not define. */
+#define EVENT__pid_t @EVENT__pid_t@
+
+/* Define to `unsigned' if <sys/types.h> does not define. */
+#define EVENT__size_t @EVENT__size_t@
+
+/* Define to unsigned int if you dont have it */
+#define EVENT__socklen_t @EVENT__socklen_t@
+
+/* Define to `int' if <sys/types.h> does not define. */
+#define EVENT__ssize_t @EVENT__ssize_t@
+
+#cmakedefine EVENT__NEED_DLLIMPORT
+
+/* Define to 1 if you have ERR_remove_thread_stat(). */
+#cmakedefine EVENT__HAVE_ERR_REMOVE_THREAD_STATE
+
+#endif
diff --git a/libs/libevent/docs/event.3 b/libs/libevent/docs/event.3
new file mode 100644
index 0000000000..655a823efe
--- /dev/null
+++ b/libs/libevent/docs/event.3
@@ -0,0 +1,624 @@
+.\" $OpenBSD: event.3,v 1.4 2002/07/12 18:50:48 provos Exp $
+.\"
+.\" Copyright (c) 2000 Artur Grabowski <art@openbsd.org>
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\"
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. The name of the author may not be used to endorse or promote products
+.\" derived from this software without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+.\" INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+.\" AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+.\" THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+.\" EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+.\" PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+.\" OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+.\" WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+.\" OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+.\" ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+.\"
+.Dd August 8, 2000
+.Dt EVENT 3
+.Os
+.Sh NAME
+.Nm event_init ,
+.Nm event_dispatch ,
+.Nm event_loop ,
+.Nm event_loopexit ,
+.Nm event_loopbreak ,
+.Nm event_set ,
+.Nm event_base_dispatch ,
+.Nm event_base_loop ,
+.Nm event_base_loopexit ,
+.Nm event_base_loopbreak ,
+.Nm event_base_set ,
+.Nm event_base_free ,
+.Nm event_add ,
+.Nm event_del ,
+.Nm event_once ,
+.Nm event_base_once ,
+.Nm event_pending ,
+.Nm event_initialized ,
+.Nm event_priority_init ,
+.Nm event_priority_set ,
+.Nm evtimer_set ,
+.Nm evtimer_add ,
+.Nm evtimer_del ,
+.Nm evtimer_pending ,
+.Nm evtimer_initialized ,
+.Nm signal_set ,
+.Nm signal_add ,
+.Nm signal_del ,
+.Nm signal_pending ,
+.Nm signal_initialized ,
+.Nm bufferevent_new ,
+.Nm bufferevent_free ,
+.Nm bufferevent_write ,
+.Nm bufferevent_write_buffer ,
+.Nm bufferevent_read ,
+.Nm bufferevent_enable ,
+.Nm bufferevent_disable ,
+.Nm bufferevent_settimeout ,
+.Nm bufferevent_base_set ,
+.Nm evbuffer_new ,
+.Nm evbuffer_free ,
+.Nm evbuffer_add ,
+.Nm evbuffer_add_buffer ,
+.Nm evbuffer_add_printf ,
+.Nm evbuffer_add_vprintf ,
+.Nm evbuffer_drain ,
+.Nm evbuffer_write ,
+.Nm evbuffer_read ,
+.Nm evbuffer_find ,
+.Nm evbuffer_readline ,
+.Nm evhttp_new ,
+.Nm evhttp_bind_socket ,
+.Nm evhttp_free
+.Nd execute a function when a specific event occurs
+.Sh SYNOPSIS
+.Fd #include <sys/time.h>
+.Fd #include <event.h>
+.Ft "struct event_base *"
+.Fn "event_init" "void"
+.Ft int
+.Fn "event_dispatch" "void"
+.Ft int
+.Fn "event_loop" "int flags"
+.Ft int
+.Fn "event_loopexit" "struct timeval *tv"
+.Ft int
+.Fn "event_loopbreak" "void"
+.Ft void
+.Fn "event_set" "struct event *ev" "int fd" "short event" "void (*fn)(int, short, void *)" "void *arg"
+.Ft int
+.Fn "event_base_dispatch" "struct event_base *base"
+.Ft int
+.Fn "event_base_loop" "struct event_base *base" "int flags"
+.Ft int
+.Fn "event_base_loopexit" "struct event_base *base" "struct timeval *tv"
+.Ft int
+.Fn "event_base_loopbreak" "struct event_base *base"
+.Ft int
+.Fn "event_base_set" "struct event_base *base" "struct event *"
+.Ft void
+.Fn "event_base_free" "struct event_base *base"
+.Ft int
+.Fn "event_add" "struct event *ev" "struct timeval *tv"
+.Ft int
+.Fn "event_del" "struct event *ev"
+.Ft int
+.Fn "event_once" "int fd" "short event" "void (*fn)(int, short, void *)" "void *arg" "struct timeval *tv"
+.Ft int
+.Fn "event_base_once" "struct event_base *base" "int fd" "short event" "void (*fn)(int, short, void *)" "void *arg" "struct timeval *tv"
+.Ft int
+.Fn "event_pending" "struct event *ev" "short event" "struct timeval *tv"
+.Ft int
+.Fn "event_initialized" "struct event *ev"
+.Ft int
+.Fn "event_priority_init" "int npriorities"
+.Ft int
+.Fn "event_priority_set" "struct event *ev" "int priority"
+.Ft void
+.Fn "evtimer_set" "struct event *ev" "void (*fn)(int, short, void *)" "void *arg"
+.Ft void
+.Fn "evtimer_add" "struct event *ev" "struct timeval *"
+.Ft void
+.Fn "evtimer_del" "struct event *ev"
+.Ft int
+.Fn "evtimer_pending" "struct event *ev" "struct timeval *tv"
+.Ft int
+.Fn "evtimer_initialized" "struct event *ev"
+.Ft void
+.Fn "signal_set" "struct event *ev" "int signal" "void (*fn)(int, short, void *)" "void *arg"
+.Ft void
+.Fn "signal_add" "struct event *ev" "struct timeval *"
+.Ft void
+.Fn "signal_del" "struct event *ev"
+.Ft int
+.Fn "signal_pending" "struct event *ev" "struct timeval *tv"
+.Ft int
+.Fn "signal_initialized" "struct event *ev"
+.Ft "struct bufferevent *"
+.Fn "bufferevent_new" "int fd" "evbuffercb readcb" "evbuffercb writecb" "everrorcb" "void *cbarg"
+.Ft void
+.Fn "bufferevent_free" "struct bufferevent *bufev"
+.Ft int
+.Fn "bufferevent_write" "struct bufferevent *bufev" "void *data" "size_t size"
+.Ft int
+.Fn "bufferevent_write_buffer" "struct bufferevent *bufev" "struct evbuffer *buf"
+.Ft size_t
+.Fn "bufferevent_read" "struct bufferevent *bufev" "void *data" "size_t size"
+.Ft int
+.Fn "bufferevent_enable" "struct bufferevent *bufev" "short event"
+.Ft int
+.Fn "bufferevent_disable" "struct bufferevent *bufev" "short event"
+.Ft void
+.Fn "bufferevent_settimeout" "struct bufferevent *bufev" "int timeout_read" "int timeout_write"
+.Ft int
+.Fn "bufferevent_base_set" "struct event_base *base" "struct bufferevent *bufev"
+.Ft "struct evbuffer *"
+.Fn "evbuffer_new" "void"
+.Ft void
+.Fn "evbuffer_free" "struct evbuffer *buf"
+.Ft int
+.Fn "evbuffer_add" "struct evbuffer *buf" "const void *data" "size_t size"
+.Ft int
+.Fn "evbuffer_add_buffer" "struct evbuffer *dst" "struct evbuffer *src"
+.Ft int
+.Fn "evbuffer_add_printf" "struct evbuffer *buf" "const char *fmt" "..."
+.Ft int
+.Fn "evbuffer_add_vprintf" "struct evbuffer *buf" "const char *fmt" "va_list ap"
+.Ft void
+.Fn "evbuffer_drain" "struct evbuffer *buf" "size_t size"
+.Ft int
+.Fn "evbuffer_write" "struct evbuffer *buf" "int fd"
+.Ft int
+.Fn "evbuffer_read" "struct evbuffer *buf" "int fd" "int size"
+.Ft "unsigned char *"
+.Fn "evbuffer_find" "struct evbuffer *buf" "const unsigned char *data" "size_t size"
+.Ft "char *"
+.Fn "evbuffer_readline" "struct evbuffer *buf"
+.Ft "struct evhttp *"
+.Fn "evhttp_new" "struct event_base *base"
+.Ft int
+.Fn "evhttp_bind_socket" "struct evhttp *http" "const char *address" "unsigned short port"
+.Ft "void"
+.Fn "evhttp_free" "struct evhttp *http"
+.Ft int
+.Fa (*event_sigcb)(void) ;
+.Ft volatile sig_atomic_t
+.Fa event_gotsig ;
+.Sh DESCRIPTION
+The
+.Nm event
+API provides a mechanism to execute a function when a specific event
+on a file descriptor occurs or after a given time has passed.
+.Pp
+The
+.Nm event
+API needs to be initialized with
+.Fn event_init
+before it can be used.
+.Pp
+In order to process events, an application needs to call
+.Fn event_dispatch .
+This function only returns on error, and should replace the event core
+of the application program.
+.Pp
+The function
+.Fn event_set
+prepares the event structure
+.Fa ev
+to be used in future calls to
+.Fn event_add
+and
+.Fn event_del .
+The event will be prepared to call the function specified by the
+.Fa fn
+argument with an
+.Fa int
+argument indicating the file descriptor, a
+.Fa short
+argument indicating the type of event, and a
+.Fa void *
+argument given in the
+.Fa arg
+argument.
+The
+.Fa fd
+indicates the file descriptor that should be monitored for events.
+The events can be either
+.Va EV_READ ,
+.Va EV_WRITE ,
+or both,
+indicating that an application can read or write from the file descriptor
+respectively without blocking.
+.Pp
+The function
+.Fa fn
+will be called with the file descriptor that triggered the event and
+the type of event which will be either
+.Va EV_TIMEOUT ,
+.Va EV_SIGNAL ,
+.Va EV_READ ,
+or
+.Va EV_WRITE .
+Additionally, an event which has registered interest in more than one of the
+preceeding events, via bitwise-OR to
+.Fn event_set ,
+can provide its callback function with a bitwise-OR of more than one triggered
+event.
+The additional flag
+.Va EV_PERSIST
+makes an
+.Fn event_add
+persistent until
+.Fn event_del
+has been called.
+.Pp
+Once initialized, the
+.Fa ev
+structure can be used repeatedly with
+.Fn event_add
+and
+.Fn event_del
+and does not need to be reinitialized unless the function called and/or
+the argument to it are to be changed.
+However, when an
+.Fa ev
+structure has been added to libevent using
+.Fn event_add
+the structure must persist until the event occurs (assuming
+.Fa EV_PERSIST
+is not set) or is removed
+using
+.Fn event_del .
+You may not reuse the same
+.Fa ev
+structure for multiple monitored descriptors; each descriptor
+needs its own
+.Fa ev .
+.Pp
+The function
+.Fn event_add
+schedules the execution of the
+.Fa ev
+event when the event specified in
+.Fn event_set
+occurs or in at least the time specified in the
+.Fa tv .
+If
+.Fa tv
+is
+.Dv NULL ,
+no timeout occurs and the function will only be called
+if a matching event occurs on the file descriptor.
+The event in the
+.Fa ev
+argument must be already initialized by
+.Fn event_set
+and may not be used in calls to
+.Fn event_set
+until it has timed out or been removed with
+.Fn event_del .
+If the event in the
+.Fa ev
+argument already has a scheduled timeout, the old timeout will be
+replaced by the new one.
+.Pp
+The function
+.Fn event_del
+will cancel the event in the argument
+.Fa ev .
+If the event has already executed or has never been added
+the call will have no effect.
+.Pp
+The functions
+.Fn evtimer_set ,
+.Fn evtimer_add ,
+.Fn evtimer_del ,
+.Fn evtimer_initialized ,
+and
+.Fn evtimer_pending
+are abbreviations for common situations where only a timeout is required.
+The file descriptor passed will be \-1, and the event type will be
+.Va EV_TIMEOUT .
+.Pp
+The functions
+.Fn signal_set ,
+.Fn signal_add ,
+.Fn signal_del ,
+.Fn signal_initialized ,
+and
+.Fn signal_pending
+are abbreviations.
+The event type will be a persistent
+.Va EV_SIGNAL .
+That means
+.Fn signal_set
+adds
+.Va EV_PERSIST .
+.Pp
+In order to avoid races in signal handlers, the
+.Nm event
+API provides two variables:
+.Va event_sigcb
+and
+.Va event_gotsig .
+A signal handler
+sets
+.Va event_gotsig
+to indicate that a signal has been received.
+The application sets
+.Va event_sigcb
+to a callback function.
+After the signal handler sets
+.Va event_gotsig ,
+.Nm event_dispatch
+will execute the callback function to process received signals.
+The callback returns 1 when no events are registered any more.
+It can return \-1 to indicate an error to the
+.Nm event
+library, causing
+.Fn event_dispatch
+to terminate with
+.Va errno
+set to
+.Er EINTR .
+.Pp
+The function
+.Fn event_once
+is similar to
+.Fn event_set .
+However, it schedules a callback to be called exactly once and does not
+require the caller to prepare an
+.Fa event
+structure.
+This function supports
+.Fa EV_TIMEOUT ,
+.Fa EV_READ ,
+and
+.Fa EV_WRITE .
+.Pp
+The
+.Fn event_pending
+function can be used to check if the event specified by
+.Fa event
+is pending to run.
+If
+.Va EV_TIMEOUT
+was specified and
+.Fa tv
+is not
+.Dv NULL ,
+the expiration time of the event will be returned in
+.Fa tv .
+.Pp
+The
+.Fn event_initialized
+macro can be used to check if an event has been initialized.
+.Pp
+The
+.Nm event_loop
+function provides an interface for single pass execution of pending
+events.
+The flags
+.Va EVLOOP_ONCE
+and
+.Va EVLOOP_NONBLOCK
+are recognized.
+The
+.Nm event_loopexit
+function exits from the event loop. The next
+.Fn event_loop
+iteration after the
+given timer expires will complete normally (handling all queued events) then
+exit without blocking for events again. Subsequent invocations of
+.Fn event_loop
+will proceed normally.
+The
+.Nm event_loopbreak
+function exits from the event loop immediately.
+.Fn event_loop
+will abort after the next event is completed;
+.Fn event_loopbreak
+is typically invoked from this event's callback. This behavior is analogous
+to the "break;" statement. Subsequent invocations of
+.Fn event_loop
+will proceed normally.
+.Pp
+It is the responsibility of the caller to provide these functions with
+pre-allocated event structures.
+.Pp
+.Sh EVENT PRIORITIES
+By default
+.Nm libevent
+schedules all active events with the same priority.
+However, sometimes it is desirable to process some events with a higher
+priority than others.
+For that reason,
+.Nm libevent
+supports strict priority queues.
+Active events with a lower priority are always processed before events
+with a higher priority.
+.Pp
+The number of different priorities can be set initially with the
+.Fn event_priority_init
+function.
+This function should be called before the first call to
+.Fn event_dispatch .
+The
+.Fn event_priority_set
+function can be used to assign a priority to an event.
+By default,
+.Nm libevent
+assigns the middle priority to all events unless their priority
+is explicitly set.
+.Sh THREAD SAFE EVENTS
+.Nm Libevent
+has experimental support for thread-safe events.
+When initializing the library via
+.Fn event_init ,
+an event base is returned.
+This event base can be used in conjunction with calls to
+.Fn event_base_set ,
+.Fn event_base_dispatch ,
+.Fn event_base_loop ,
+.Fn event_base_loopexit ,
+.Fn bufferevent_base_set
+and
+.Fn event_base_free .
+.Fn event_base_set
+should be called after preparing an event with
+.Fn event_set ,
+as
+.Fn event_set
+assigns the provided event to the most recently created event base.
+.Fn bufferevent_base_set
+should be called after preparing a bufferevent with
+.Fn bufferevent_new .
+.Fn event_base_free
+should be used to free memory associated with the event base
+when it is no longer needed.
+.Sh BUFFERED EVENTS
+.Nm libevent
+provides an abstraction on top of the regular event callbacks.
+This abstraction is called a
+.Va "buffered event" .
+A buffered event provides input and output buffers that get filled
+and drained automatically.
+The user of a buffered event no longer deals directly with the IO,
+but instead is reading from input and writing to output buffers.
+.Pp
+A new bufferevent is created by
+.Fn bufferevent_new .
+The parameter
+.Fa fd
+specifies the file descriptor from which data is read and written to.
+This file descriptor is not allowed to be a
+.Xr pipe 2 .
+The next three parameters are callbacks.
+The read and write callback have the following form:
+.Ft void
+.Fn "(*cb)" "struct bufferevent *bufev" "void *arg" .
+The error callback has the following form:
+.Ft void
+.Fn "(*cb)" "struct bufferevent *bufev" "short what" "void *arg" .
+The argument is specified by the fourth parameter
+.Fa "cbarg" .
+A
+.Fa bufferevent struct
+pointer is returned on success, NULL on error.
+Both the read and the write callback may be NULL.
+The error callback has to be always provided.
+.Pp
+Once initialized, the bufferevent structure can be used repeatedly with
+bufferevent_enable() and bufferevent_disable().
+The flags parameter can be a combination of
+.Va EV_READ
+and
+.Va EV_WRITE .
+When read enabled the bufferevent will try to read from the file
+descriptor and call the read callback.
+The write callback is executed
+whenever the output buffer is drained below the write low watermark,
+which is
+.Va 0
+by default.
+.Pp
+The
+.Fn bufferevent_write
+function can be used to write data to the file descriptor.
+The data is appended to the output buffer and written to the descriptor
+automatically as it becomes available for writing.
+.Fn bufferevent_write
+returns 0 on success or \-1 on failure.
+The
+.Fn bufferevent_read
+function is used to read data from the input buffer,
+returning the amount of data read.
+.Pp
+If multiple bases are in use, bufferevent_base_set() must be called before
+enabling the bufferevent for the first time.
+.Sh NON-BLOCKING HTTP SUPPORT
+.Nm libevent
+provides a very thin HTTP layer that can be used both to host an HTTP
+server and also to make HTTP requests.
+An HTTP server can be created by calling
+.Fn evhttp_new .
+It can be bound to any port and address with the
+.Fn evhttp_bind_socket
+function.
+When the HTTP server is no longer used, it can be freed via
+.Fn evhttp_free .
+.Pp
+To be notified of HTTP requests, a user needs to register callbacks with the
+HTTP server.
+This can be done by calling
+.Fn evhttp_set_cb .
+The second argument is the URI for which a callback is being registered.
+The corresponding callback will receive an
+.Va struct evhttp_request
+object that contains all information about the request.
+.Pp
+This section does not document all the possible function calls; please
+check
+.Va event.h
+for the public interfaces.
+.Sh ADDITIONAL NOTES
+It is possible to disable support for
+.Va epoll , kqueue , devpoll , poll
+or
+.Va select
+by setting the environment variable
+.Va EVENT_NOEPOLL , EVENT_NOKQUEUE , EVENT_NODEVPOLL , EVENT_NOPOLL
+or
+.Va EVENT_NOSELECT ,
+respectively.
+By setting the environment variable
+.Va EVENT_SHOW_METHOD ,
+.Nm libevent
+displays the kernel notification method that it uses.
+.Sh RETURN VALUES
+Upon successful completion
+.Fn event_add
+and
+.Fn event_del
+return 0.
+Otherwise, \-1 is returned and the global variable errno is
+set to indicate the error.
+.Sh SEE ALSO
+.Xr kqueue 2 ,
+.Xr poll 2 ,
+.Xr select 2 ,
+.Xr evdns 3 ,
+.Xr timeout 9
+.Sh HISTORY
+The
+.Nm event
+API manpage is based on the
+.Xr timeout 9
+manpage by Artur Grabowski.
+The port of
+.Nm libevent
+to Windows is due to Michael A. Davis.
+Support for real-time signals is due to Taral.
+.Sh AUTHORS
+The
+.Nm event
+library was written by Niels Provos.
+.Sh BUGS
+This documentation is neither complete nor authoritative.
+If you are in doubt about the usage of this API then
+check the source code to find out how it works, write
+up the missing piece of documentation and send it to
+me for inclusion in this man page.
diff --git a/libs/libevent/docs/event_rpcgen.py b/libs/libevent/docs/event_rpcgen.py
new file mode 100644
index 0000000000..c537565696
--- /dev/null
+++ b/libs/libevent/docs/event_rpcgen.py
@@ -0,0 +1,1728 @@
+#!/usr/bin/env python2
+#
+# Copyright (c) 2005-2007 Niels Provos <provos@citi.umich.edu>
+# Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+# All rights reserved.
+#
+# Generates marshaling code based on libevent.
+
+# TODO:
+# 1) use optparse to allow the strategy shell to parse options, and
+# to allow the instantiated factory (for the specific output language)
+# to parse remaining options
+# 2) move the globals into a class that manages execution (including the
+# progress outputs that space stderr at the moment)
+# 3) emit other languages
+
+import sys
+import re
+
+_NAME = "event_rpcgen.py"
+_VERSION = "0.1"
+
+# Globals
+line_count = 0
+
+white = re.compile(r'\s+')
+cppcomment = re.compile(r'\/\/.*$')
+nonident = re.compile(r'[^a-zA-Z0-9_]')
+structref = re.compile(r'^struct\[([a-zA-Z_][a-zA-Z0-9_]*)\]$')
+structdef = re.compile(r'^struct +[a-zA-Z_][a-zA-Z0-9_]* *{$')
+
+headerdirect = []
+cppdirect = []
+
+QUIETLY = 0
+
+def declare(s):
+ if not QUIETLY:
+ print s
+
+def TranslateList(mylist, mydict):
+ return map(lambda x: x % mydict, mylist)
+
+# Exception class for parse errors
+class RpcGenError(Exception):
+ def __init__(self, why):
+ self.why = why
+ def __str__(self):
+ return str(self.why)
+
+# Holds everything that makes a struct
+class Struct:
+ def __init__(self, name):
+ self._name = name
+ self._entries = []
+ self._tags = {}
+ declare(' Created struct: %s' % name)
+
+ def AddEntry(self, entry):
+ if self._tags.has_key(entry.Tag()):
+ raise RpcGenError(
+ 'Entry "%s" duplicates tag number %d from "%s" '
+ 'around line %d' % (entry.Name(), entry.Tag(),
+ self._tags[entry.Tag()], line_count))
+ self._entries.append(entry)
+ self._tags[entry.Tag()] = entry.Name()
+ declare(' Added entry: %s' % entry.Name())
+
+ def Name(self):
+ return self._name
+
+ def EntryTagName(self, entry):
+ """Creates the name inside an enumeration for distinguishing data
+ types."""
+ name = "%s_%s" % (self._name, entry.Name())
+ return name.upper()
+
+ def PrintIndented(self, file, ident, code):
+ """Takes an array, add indentation to each entry and prints it."""
+ for entry in code:
+ print >>file, '%s%s' % (ident, entry)
+
+class StructCCode(Struct):
+ """ Knows how to generate C code for a struct """
+
+ def __init__(self, name):
+ Struct.__init__(self, name)
+
+ def PrintTags(self, file):
+ """Prints the tag definitions for a structure."""
+ print >>file, '/* Tag definition for %s */' % self._name
+ print >>file, 'enum %s_ {' % self._name.lower()
+ for entry in self._entries:
+ print >>file, ' %s=%d,' % (self.EntryTagName(entry),
+ entry.Tag())
+ print >>file, ' %s_MAX_TAGS' % (self._name.upper())
+ print >>file, '};\n'
+
+ def PrintForwardDeclaration(self, file):
+ print >>file, 'struct %s;' % self._name
+
+ def PrintDeclaration(self, file):
+ print >>file, '/* Structure declaration for %s */' % self._name
+ print >>file, 'struct %s_access_ {' % self._name
+ for entry in self._entries:
+ dcl = entry.AssignDeclaration('(*%s_assign)' % entry.Name())
+ dcl.extend(
+ entry.GetDeclaration('(*%s_get)' % entry.Name()))
+ if entry.Array():
+ dcl.extend(
+ entry.AddDeclaration('(*%s_add)' % entry.Name()))
+ self.PrintIndented(file, ' ', dcl)
+ print >>file, '};\n'
+
+ print >>file, 'struct %s {' % self._name
+ print >>file, ' struct %s_access_ *base;\n' % self._name
+ for entry in self._entries:
+ dcl = entry.Declaration()
+ self.PrintIndented(file, ' ', dcl)
+ print >>file, ''
+ for entry in self._entries:
+ print >>file, ' ev_uint8_t %s_set;' % entry.Name()
+ print >>file, '};\n'
+
+ print >>file, \
+"""struct %(name)s *%(name)s_new(void);
+struct %(name)s *%(name)s_new_with_arg(void *);
+void %(name)s_free(struct %(name)s *);
+void %(name)s_clear(struct %(name)s *);
+void %(name)s_marshal(struct evbuffer *, const struct %(name)s *);
+int %(name)s_unmarshal(struct %(name)s *, struct evbuffer *);
+int %(name)s_complete(struct %(name)s *);
+void evtag_marshal_%(name)s(struct evbuffer *, ev_uint32_t,
+ const struct %(name)s *);
+int evtag_unmarshal_%(name)s(struct evbuffer *, ev_uint32_t,
+ struct %(name)s *);""" % { 'name' : self._name }
+
+
+ # Write a setting function of every variable
+ for entry in self._entries:
+ self.PrintIndented(file, '', entry.AssignDeclaration(
+ entry.AssignFuncName()))
+ self.PrintIndented(file, '', entry.GetDeclaration(
+ entry.GetFuncName()))
+ if entry.Array():
+ self.PrintIndented(file, '', entry.AddDeclaration(
+ entry.AddFuncName()))
+
+ print >>file, '/* --- %s done --- */\n' % self._name
+
+ def PrintCode(self, file):
+ print >>file, ('/*\n'
+ ' * Implementation of %s\n'
+ ' */\n') % self._name
+
+ print >>file, \
+ 'static struct %(name)s_access_ %(name)s_base__ = {' % \
+ { 'name' : self._name }
+ for entry in self._entries:
+ self.PrintIndented(file, ' ', entry.CodeBase())
+ print >>file, '};\n'
+
+ # Creation
+ print >>file, (
+ 'struct %(name)s *\n'
+ '%(name)s_new(void)\n'
+ '{\n'
+ ' return %(name)s_new_with_arg(NULL);\n'
+ '}\n'
+ '\n'
+ 'struct %(name)s *\n'
+ '%(name)s_new_with_arg(void *unused)\n'
+ '{\n'
+ ' struct %(name)s *tmp;\n'
+ ' if ((tmp = malloc(sizeof(struct %(name)s))) == NULL) {\n'
+ ' event_warn("%%s: malloc", __func__);\n'
+ ' return (NULL);\n'
+ ' }\n'
+ ' tmp->base = &%(name)s_base__;\n') % { 'name' : self._name }
+
+ for entry in self._entries:
+ self.PrintIndented(file, ' ', entry.CodeInitialize('tmp'))
+ print >>file, ' tmp->%s_set = 0;\n' % entry.Name()
+
+ print >>file, (
+ ' return (tmp);\n'
+ '}\n')
+
+ # Adding
+ for entry in self._entries:
+ if entry.Array():
+ self.PrintIndented(file, '', entry.CodeAdd())
+ print >>file, ''
+
+ # Assigning
+ for entry in self._entries:
+ self.PrintIndented(file, '', entry.CodeAssign())
+ print >>file, ''
+
+ # Getting
+ for entry in self._entries:
+ self.PrintIndented(file, '', entry.CodeGet())
+ print >>file, ''
+
+ # Clearing
+ print >>file, ( 'void\n'
+ '%(name)s_clear(struct %(name)s *tmp)\n'
+ '{'
+ ) % { 'name' : self._name }
+ for entry in self._entries:
+ self.PrintIndented(file, ' ', entry.CodeClear('tmp'))
+
+ print >>file, '}\n'
+
+ # Freeing
+ print >>file, ( 'void\n'
+ '%(name)s_free(struct %(name)s *tmp)\n'
+ '{'
+ ) % { 'name' : self._name }
+
+ for entry in self._entries:
+ self.PrintIndented(file, ' ', entry.CodeFree('tmp'))
+
+ print >>file, (' free(tmp);\n'
+ '}\n')
+
+ # Marshaling
+ print >>file, ('void\n'
+ '%(name)s_marshal(struct evbuffer *evbuf, '
+ 'const struct %(name)s *tmp)'
+ '{') % { 'name' : self._name }
+ for entry in self._entries:
+ indent = ' '
+ # Optional entries do not have to be set
+ if entry.Optional():
+ indent += ' '
+ print >>file, ' if (tmp->%s_set) {' % entry.Name()
+ self.PrintIndented(
+ file, indent,
+ entry.CodeMarshal('evbuf', self.EntryTagName(entry),
+ entry.GetVarName('tmp'),
+ entry.GetVarLen('tmp')))
+ if entry.Optional():
+ print >>file, ' }'
+
+ print >>file, '}\n'
+
+ # Unmarshaling
+ print >>file, ('int\n'
+ '%(name)s_unmarshal(struct %(name)s *tmp, '
+ ' struct evbuffer *evbuf)\n'
+ '{\n'
+ ' ev_uint32_t tag;\n'
+ ' while (evbuffer_get_length(evbuf) > 0) {\n'
+ ' if (evtag_peek(evbuf, &tag) == -1)\n'
+ ' return (-1);\n'
+ ' switch (tag) {\n'
+ ) % { 'name' : self._name }
+ for entry in self._entries:
+ print >>file, ' case %s:\n' % self.EntryTagName(entry)
+ if not entry.Array():
+ print >>file, (
+ ' if (tmp->%s_set)\n'
+ ' return (-1);'
+ ) % (entry.Name())
+
+ self.PrintIndented(
+ file, ' ',
+ entry.CodeUnmarshal('evbuf',
+ self.EntryTagName(entry),
+ entry.GetVarName('tmp'),
+ entry.GetVarLen('tmp')))
+
+ print >>file, ( ' tmp->%s_set = 1;\n' % entry.Name() +
+ ' break;\n' )
+ print >>file, ( ' default:\n'
+ ' return -1;\n'
+ ' }\n'
+ ' }\n' )
+ # Check if it was decoded completely
+ print >>file, ( ' if (%(name)s_complete(tmp) == -1)\n'
+ ' return (-1);'
+ ) % { 'name' : self._name }
+
+ # Successfully decoded
+ print >>file, ( ' return (0);\n'
+ '}\n')
+
+ # Checking if a structure has all the required data
+ print >>file, (
+ 'int\n'
+ '%(name)s_complete(struct %(name)s *msg)\n'
+ '{' ) % { 'name' : self._name }
+ for entry in self._entries:
+ if not entry.Optional():
+ code = [
+ 'if (!msg->%(name)s_set)',
+ ' return (-1);' ]
+ code = TranslateList(code, entry.GetTranslation())
+ self.PrintIndented(
+ file, ' ', code)
+
+ self.PrintIndented(
+ file, ' ',
+ entry.CodeComplete('msg', entry.GetVarName('msg')))
+ print >>file, (
+ ' return (0);\n'
+ '}\n' )
+
+ # Complete message unmarshaling
+ print >>file, (
+ 'int\n'
+ 'evtag_unmarshal_%(name)s(struct evbuffer *evbuf, '
+ 'ev_uint32_t need_tag, struct %(name)s *msg)\n'
+ '{\n'
+ ' ev_uint32_t tag;\n'
+ ' int res = -1;\n'
+ '\n'
+ ' struct evbuffer *tmp = evbuffer_new();\n'
+ '\n'
+ ' if (evtag_unmarshal(evbuf, &tag, tmp) == -1'
+ ' || tag != need_tag)\n'
+ ' goto error;\n'
+ '\n'
+ ' if (%(name)s_unmarshal(msg, tmp) == -1)\n'
+ ' goto error;\n'
+ '\n'
+ ' res = 0;\n'
+ '\n'
+ ' error:\n'
+ ' evbuffer_free(tmp);\n'
+ ' return (res);\n'
+ '}\n' ) % { 'name' : self._name }
+
+ # Complete message marshaling
+ print >>file, (
+ 'void\n'
+ 'evtag_marshal_%(name)s(struct evbuffer *evbuf, ev_uint32_t tag, '
+ 'const struct %(name)s *msg)\n'
+ '{\n'
+ ' struct evbuffer *buf_ = evbuffer_new();\n'
+ ' assert(buf_ != NULL);\n'
+ ' %(name)s_marshal(buf_, msg);\n'
+ ' evtag_marshal_buffer(evbuf, tag, buf_);\n '
+ ' evbuffer_free(buf_);\n'
+ '}\n' ) % { 'name' : self._name }
+
+class Entry:
+ def __init__(self, type, name, tag):
+ self._type = type
+ self._name = name
+ self._tag = int(tag)
+ self._ctype = type
+ self._optional = 0
+ self._can_be_array = 0
+ self._array = 0
+ self._line_count = -1
+ self._struct = None
+ self._refname = None
+
+ self._optpointer = True
+ self._optaddarg = True
+
+ def GetInitializer(self):
+ assert 0, "Entry does not provide initializer"
+
+ def SetStruct(self, struct):
+ self._struct = struct
+
+ def LineCount(self):
+ assert self._line_count != -1
+ return self._line_count
+
+ def SetLineCount(self, number):
+ self._line_count = number
+
+ def Array(self):
+ return self._array
+
+ def Optional(self):
+ return self._optional
+
+ def Tag(self):
+ return self._tag
+
+ def Name(self):
+ return self._name
+
+ def Type(self):
+ return self._type
+
+ def MakeArray(self, yes=1):
+ self._array = yes
+
+ def MakeOptional(self):
+ self._optional = 1
+
+ def Verify(self):
+ if self.Array() and not self._can_be_array:
+ raise RpcGenError(
+ 'Entry "%s" cannot be created as an array '
+ 'around line %d' % (self._name, self.LineCount()))
+ if not self._struct:
+ raise RpcGenError(
+ 'Entry "%s" does not know which struct it belongs to '
+ 'around line %d' % (self._name, self.LineCount()))
+ if self._optional and self._array:
+ raise RpcGenError(
+ 'Entry "%s" has illegal combination of optional and array '
+ 'around line %d' % (self._name, self.LineCount()))
+
+ def GetTranslation(self, extradict = {}):
+ mapping = {
+ "parent_name" : self._struct.Name(),
+ "name" : self._name,
+ "ctype" : self._ctype,
+ "refname" : self._refname,
+ "optpointer" : self._optpointer and "*" or "",
+ "optreference" : self._optpointer and "&" or "",
+ "optaddarg" :
+ self._optaddarg and ", const %s value" % self._ctype or ""
+ }
+ for (k, v) in extradict.items():
+ mapping[k] = v
+
+ return mapping
+
+ def GetVarName(self, var):
+ return '%(var)s->%(name)s_data' % self.GetTranslation({ 'var' : var })
+
+ def GetVarLen(self, var):
+ return 'sizeof(%s)' % self._ctype
+
+ def GetFuncName(self):
+ return '%s_%s_get' % (self._struct.Name(), self._name)
+
+ def GetDeclaration(self, funcname):
+ code = [ 'int %s(struct %s *, %s *);' % (
+ funcname, self._struct.Name(), self._ctype ) ]
+ return code
+
+ def CodeGet(self):
+ code = (
+ 'int',
+ '%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, '
+ '%(ctype)s *value)',
+ '{',
+ ' if (msg->%(name)s_set != 1)',
+ ' return (-1);',
+ ' *value = msg->%(name)s_data;',
+ ' return (0);',
+ '}' )
+ code = '\n'.join(code)
+ code = code % self.GetTranslation()
+ return code.split('\n')
+
+ def AssignFuncName(self):
+ return '%s_%s_assign' % (self._struct.Name(), self._name)
+
+ def AddFuncName(self):
+ return '%s_%s_add' % (self._struct.Name(), self._name)
+
+ def AssignDeclaration(self, funcname):
+ code = [ 'int %s(struct %s *, const %s);' % (
+ funcname, self._struct.Name(), self._ctype ) ]
+ return code
+
+ def CodeAssign(self):
+ code = [ 'int',
+ '%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,'
+ ' const %(ctype)s value)',
+ '{',
+ ' msg->%(name)s_set = 1;',
+ ' msg->%(name)s_data = value;',
+ ' return (0);',
+ '}' ]
+ code = '\n'.join(code)
+ code = code % self.GetTranslation()
+ return code.split('\n')
+
+ def CodeClear(self, structname):
+ code = [ '%s->%s_set = 0;' % (structname, self.Name()) ]
+
+ return code
+
+ def CodeComplete(self, structname, var_name):
+ return []
+
+ def CodeFree(self, name):
+ return []
+
+ def CodeBase(self):
+ code = [
+ '%(parent_name)s_%(name)s_assign,',
+ '%(parent_name)s_%(name)s_get,'
+ ]
+ if self.Array():
+ code.append('%(parent_name)s_%(name)s_add,')
+
+ code = '\n'.join(code)
+ code = code % self.GetTranslation()
+ return code.split('\n')
+
+class EntryBytes(Entry):
+ def __init__(self, type, name, tag, length):
+ # Init base class
+ Entry.__init__(self, type, name, tag)
+
+ self._length = length
+ self._ctype = 'ev_uint8_t'
+
+ def GetInitializer(self):
+ return "NULL"
+
+ def GetVarLen(self, var):
+ return '(%s)' % self._length
+
+ def CodeArrayAdd(self, varname, value):
+ # XXX: copy here
+ return [ '%(varname)s = NULL;' % { 'varname' : varname } ]
+
+ def GetDeclaration(self, funcname):
+ code = [ 'int %s(struct %s *, %s **);' % (
+ funcname, self._struct.Name(), self._ctype ) ]
+ return code
+
+ def AssignDeclaration(self, funcname):
+ code = [ 'int %s(struct %s *, const %s *);' % (
+ funcname, self._struct.Name(), self._ctype ) ]
+ return code
+
+ def Declaration(self):
+ dcl = ['ev_uint8_t %s_data[%s];' % (self._name, self._length)]
+
+ return dcl
+
+ def CodeGet(self):
+ name = self._name
+ code = [ 'int',
+ '%s_%s_get(struct %s *msg, %s **value)' % (
+ self._struct.Name(), name,
+ self._struct.Name(), self._ctype),
+ '{',
+ ' if (msg->%s_set != 1)' % name,
+ ' return (-1);',
+ ' *value = msg->%s_data;' % name,
+ ' return (0);',
+ '}' ]
+ return code
+
+ def CodeAssign(self):
+ name = self._name
+ code = [ 'int',
+ '%s_%s_assign(struct %s *msg, const %s *value)' % (
+ self._struct.Name(), name,
+ self._struct.Name(), self._ctype),
+ '{',
+ ' msg->%s_set = 1;' % name,
+ ' memcpy(msg->%s_data, value, %s);' % (
+ name, self._length),
+ ' return (0);',
+ '}' ]
+ return code
+
+ def CodeUnmarshal(self, buf, tag_name, var_name, var_len):
+ code = [ 'if (evtag_unmarshal_fixed(%(buf)s, %(tag)s, '
+ '%(var)s, %(varlen)s) == -1) {',
+ ' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
+ ' return (-1);',
+ '}'
+ ]
+ return TranslateList(code,
+ self.GetTranslation({
+ 'var' : var_name,
+ 'varlen' : var_len,
+ 'buf' : buf,
+ 'tag' : tag_name }))
+
+ def CodeMarshal(self, buf, tag_name, var_name, var_len):
+ code = ['evtag_marshal(%s, %s, %s, %s);' % (
+ buf, tag_name, var_name, var_len)]
+ return code
+
+ def CodeClear(self, structname):
+ code = [ '%s->%s_set = 0;' % (structname, self.Name()),
+ 'memset(%s->%s_data, 0, sizeof(%s->%s_data));' % (
+ structname, self._name, structname, self._name)]
+
+ return code
+
+ def CodeInitialize(self, name):
+ code = ['memset(%s->%s_data, 0, sizeof(%s->%s_data));' % (
+ name, self._name, name, self._name)]
+ return code
+
+ def Verify(self):
+ if not self._length:
+ raise RpcGenError(
+ 'Entry "%s" needs a length '
+ 'around line %d' % (self._name, self.LineCount()))
+
+ Entry.Verify(self)
+
+class EntryInt(Entry):
+ def __init__(self, type, name, tag, bits=32):
+ # Init base class
+ Entry.__init__(self, type, name, tag)
+
+ self._can_be_array = 1
+ if bits == 32:
+ self._ctype = 'ev_uint32_t'
+ self._marshal_type = 'int'
+ if bits == 64:
+ self._ctype = 'ev_uint64_t'
+ self._marshal_type = 'int64'
+
+ def GetInitializer(self):
+ return "0"
+
+ def CodeArrayFree(self, var):
+ return []
+
+ def CodeArrayAssign(self, varname, srcvar):
+ return [ '%(varname)s = %(srcvar)s;' % { 'varname' : varname,
+ 'srcvar' : srcvar } ]
+
+ def CodeArrayAdd(self, varname, value):
+ """Returns a new entry of this type."""
+ return [ '%(varname)s = %(value)s;' % { 'varname' : varname,
+ 'value' : value } ]
+
+ def CodeUnmarshal(self, buf, tag_name, var_name, var_len):
+ code = [
+ 'if (evtag_unmarshal_%(ma)s(%(buf)s, %(tag)s, &%(var)s) == -1) {',
+ ' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
+ ' return (-1);',
+ '}' ]
+ code = '\n'.join(code) % self.GetTranslation({
+ 'ma' : self._marshal_type,
+ 'buf' : buf,
+ 'tag' : tag_name,
+ 'var' : var_name })
+ return code.split('\n')
+
+ def CodeMarshal(self, buf, tag_name, var_name, var_len):
+ code = [
+ 'evtag_marshal_%s(%s, %s, %s);' % (
+ self._marshal_type, buf, tag_name, var_name)]
+ return code
+
+ def Declaration(self):
+ dcl = ['%s %s_data;' % (self._ctype, self._name)]
+
+ return dcl
+
+ def CodeInitialize(self, name):
+ code = ['%s->%s_data = 0;' % (name, self._name)]
+ return code
+
+class EntryString(Entry):
+ def __init__(self, type, name, tag):
+ # Init base class
+ Entry.__init__(self, type, name, tag)
+
+ self._can_be_array = 1
+ self._ctype = 'char *'
+
+ def GetInitializer(self):
+ return "NULL"
+
+ def CodeArrayFree(self, varname):
+ code = [
+ 'if (%(var)s != NULL) free(%(var)s);' ]
+
+ return TranslateList(code, { 'var' : varname })
+
+ def CodeArrayAssign(self, varname, srcvar):
+ code = [
+ 'if (%(var)s != NULL)',
+ ' free(%(var)s);',
+ '%(var)s = strdup(%(srcvar)s);',
+ 'if (%(var)s == NULL) {',
+ ' event_warnx("%%s: strdup", __func__);',
+ ' return (-1);',
+ '}' ]
+
+ return TranslateList(code, { 'var' : varname,
+ 'srcvar' : srcvar })
+
+ def CodeArrayAdd(self, varname, value):
+ code = [
+ 'if (%(value)s != NULL) {',
+ ' %(var)s = strdup(%(value)s);',
+ ' if (%(var)s == NULL) {',
+ ' goto error;',
+ ' }',
+ '} else {',
+ ' %(var)s = NULL;',
+ '}' ]
+
+ return TranslateList(code, { 'var' : varname,
+ 'value' : value })
+
+ def GetVarLen(self, var):
+ return 'strlen(%s)' % self.GetVarName(var)
+
+ def CodeMakeInitalize(self, varname):
+ return '%(varname)s = NULL;' % { 'varname' : varname }
+
+ def CodeAssign(self):
+ name = self._name
+ code = """int
+%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
+ const %(ctype)s value)
+{
+ if (msg->%(name)s_data != NULL)
+ free(msg->%(name)s_data);
+ if ((msg->%(name)s_data = strdup(value)) == NULL)
+ return (-1);
+ msg->%(name)s_set = 1;
+ return (0);
+}""" % self.GetTranslation()
+
+ return code.split('\n')
+
+ def CodeUnmarshal(self, buf, tag_name, var_name, var_len):
+ code = ['if (evtag_unmarshal_string(%(buf)s, %(tag)s, &%(var)s) == -1) {',
+ ' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
+ ' return (-1);',
+ '}'
+ ]
+ code = '\n'.join(code) % self.GetTranslation({
+ 'buf' : buf,
+ 'tag' : tag_name,
+ 'var' : var_name })
+ return code.split('\n')
+
+ def CodeMarshal(self, buf, tag_name, var_name, var_len):
+ code = ['evtag_marshal_string(%s, %s, %s);' % (
+ buf, tag_name, var_name)]
+ return code
+
+ def CodeClear(self, structname):
+ code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
+ ' free(%s->%s_data);' % (structname, self.Name()),
+ ' %s->%s_data = NULL;' % (structname, self.Name()),
+ ' %s->%s_set = 0;' % (structname, self.Name()),
+ '}'
+ ]
+
+ return code
+
+ def CodeInitialize(self, name):
+ code = ['%s->%s_data = NULL;' % (name, self._name)]
+ return code
+
+ def CodeFree(self, name):
+ code = ['if (%s->%s_data != NULL)' % (name, self._name),
+ ' free (%s->%s_data);' % (name, self._name)]
+
+ return code
+
+ def Declaration(self):
+ dcl = ['char *%s_data;' % self._name]
+
+ return dcl
+
+class EntryStruct(Entry):
+ def __init__(self, type, name, tag, refname):
+ # Init base class
+ Entry.__init__(self, type, name, tag)
+
+ self._optpointer = False
+ self._can_be_array = 1
+ self._refname = refname
+ self._ctype = 'struct %s*' % refname
+ self._optaddarg = False
+
+ def GetInitializer(self):
+ return "NULL"
+
+ def GetVarLen(self, var):
+ return '-1'
+
+ def CodeArrayAdd(self, varname, value):
+ code = [
+ '%(varname)s = %(refname)s_new();',
+ 'if (%(varname)s == NULL)',
+ ' goto error;' ]
+
+ return TranslateList(code, self.GetTranslation({ 'varname' : varname }))
+
+ def CodeArrayFree(self, var):
+ code = [ '%(refname)s_free(%(var)s);' % self.GetTranslation(
+ { 'var' : var }) ]
+ return code
+
+ def CodeArrayAssign(self, var, srcvar):
+ code = [
+ 'int had_error = 0;',
+ 'struct evbuffer *tmp = NULL;',
+ '%(refname)s_clear(%(var)s);',
+ 'if ((tmp = evbuffer_new()) == NULL) {',
+ ' event_warn("%%s: evbuffer_new()", __func__);',
+ ' had_error = 1;',
+ ' goto done;',
+ '}',
+ '%(refname)s_marshal(tmp, %(srcvar)s);',
+ 'if (%(refname)s_unmarshal(%(var)s, tmp) == -1) {',
+ ' event_warnx("%%s: %(refname)s_unmarshal", __func__);',
+ ' had_error = 1;',
+ ' goto done;',
+ '}',
+ 'done:'
+ 'if (tmp != NULL)',
+ ' evbuffer_free(tmp);',
+ 'if (had_error) {',
+ ' %(refname)s_clear(%(var)s);',
+ ' return (-1);',
+ '}' ]
+
+ return TranslateList(code, self.GetTranslation({
+ 'var' : var,
+ 'srcvar' : srcvar}))
+
+ def CodeGet(self):
+ name = self._name
+ code = [ 'int',
+ '%s_%s_get(struct %s *msg, %s *value)' % (
+ self._struct.Name(), name,
+ self._struct.Name(), self._ctype),
+ '{',
+ ' if (msg->%s_set != 1) {' % name,
+ ' msg->%s_data = %s_new();' % (name, self._refname),
+ ' if (msg->%s_data == NULL)' % name,
+ ' return (-1);',
+ ' msg->%s_set = 1;' % name,
+ ' }',
+ ' *value = msg->%s_data;' % name,
+ ' return (0);',
+ '}' ]
+ return code
+
+ def CodeAssign(self):
+ name = self._name
+ code = """int
+%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
+ const %(ctype)s value)
+{
+ struct evbuffer *tmp = NULL;
+ if (msg->%(name)s_set) {
+ %(refname)s_clear(msg->%(name)s_data);
+ msg->%(name)s_set = 0;
+ } else {
+ msg->%(name)s_data = %(refname)s_new();
+ if (msg->%(name)s_data == NULL) {
+ event_warn("%%s: %(refname)s_new()", __func__);
+ goto error;
+ }
+ }
+ if ((tmp = evbuffer_new()) == NULL) {
+ event_warn("%%s: evbuffer_new()", __func__);
+ goto error;
+ }
+ %(refname)s_marshal(tmp, value);
+ if (%(refname)s_unmarshal(msg->%(name)s_data, tmp) == -1) {
+ event_warnx("%%s: %(refname)s_unmarshal", __func__);
+ goto error;
+ }
+ msg->%(name)s_set = 1;
+ evbuffer_free(tmp);
+ return (0);
+ error:
+ if (tmp != NULL)
+ evbuffer_free(tmp);
+ if (msg->%(name)s_data != NULL) {
+ %(refname)s_free(msg->%(name)s_data);
+ msg->%(name)s_data = NULL;
+ }
+ return (-1);
+}""" % self.GetTranslation()
+ return code.split('\n')
+
+ def CodeComplete(self, structname, var_name):
+ code = [ 'if (%(structname)s->%(name)s_set && '
+ '%(refname)s_complete(%(var)s) == -1)',
+ ' return (-1);' ]
+
+ return TranslateList(code, self.GetTranslation({
+ 'structname' : structname,
+ 'var' : var_name }))
+
+ def CodeUnmarshal(self, buf, tag_name, var_name, var_len):
+ code = ['%(var)s = %(refname)s_new();',
+ 'if (%(var)s == NULL)',
+ ' return (-1);',
+ 'if (evtag_unmarshal_%(refname)s(%(buf)s, %(tag)s, '
+ '%(var)s) == -1) {',
+ ' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
+ ' return (-1);',
+ '}'
+ ]
+ code = '\n'.join(code) % self.GetTranslation({
+ 'buf' : buf,
+ 'tag' : tag_name,
+ 'var' : var_name })
+ return code.split('\n')
+
+ def CodeMarshal(self, buf, tag_name, var_name, var_len):
+ code = ['evtag_marshal_%s(%s, %s, %s);' % (
+ self._refname, buf, tag_name, var_name)]
+ return code
+
+ def CodeClear(self, structname):
+ code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
+ ' %s_free(%s->%s_data);' % (
+ self._refname, structname, self.Name()),
+ ' %s->%s_data = NULL;' % (structname, self.Name()),
+ ' %s->%s_set = 0;' % (structname, self.Name()),
+ '}'
+ ]
+
+ return code
+
+ def CodeInitialize(self, name):
+ code = ['%s->%s_data = NULL;' % (name, self._name)]
+ return code
+
+ def CodeFree(self, name):
+ code = ['if (%s->%s_data != NULL)' % (name, self._name),
+ ' %s_free(%s->%s_data);' % (
+ self._refname, name, self._name)]
+
+ return code
+
+ def Declaration(self):
+ dcl = ['%s %s_data;' % (self._ctype, self._name)]
+
+ return dcl
+
+class EntryVarBytes(Entry):
+ def __init__(self, type, name, tag):
+ # Init base class
+ Entry.__init__(self, type, name, tag)
+
+ self._ctype = 'ev_uint8_t *'
+
+ def GetInitializer(self):
+ return "NULL"
+
+ def GetVarLen(self, var):
+ return '%(var)s->%(name)s_length' % self.GetTranslation({ 'var' : var })
+
+ def CodeArrayAdd(self, varname, value):
+ # xxx: copy
+ return [ '%(varname)s = NULL;' % { 'varname' : varname } ]
+
+ def GetDeclaration(self, funcname):
+ code = [ 'int %s(struct %s *, %s *, ev_uint32_t *);' % (
+ funcname, self._struct.Name(), self._ctype ) ]
+ return code
+
+ def AssignDeclaration(self, funcname):
+ code = [ 'int %s(struct %s *, const %s, ev_uint32_t);' % (
+ funcname, self._struct.Name(), self._ctype ) ]
+ return code
+
+ def CodeAssign(self):
+ name = self._name
+ code = [ 'int',
+ '%s_%s_assign(struct %s *msg, '
+ 'const %s value, ev_uint32_t len)' % (
+ self._struct.Name(), name,
+ self._struct.Name(), self._ctype),
+ '{',
+ ' if (msg->%s_data != NULL)' % name,
+ ' free (msg->%s_data);' % name,
+ ' msg->%s_data = malloc(len);' % name,
+ ' if (msg->%s_data == NULL)' % name,
+ ' return (-1);',
+ ' msg->%s_set = 1;' % name,
+ ' msg->%s_length = len;' % name,
+ ' memcpy(msg->%s_data, value, len);' % name,
+ ' return (0);',
+ '}' ]
+ return code
+
+ def CodeGet(self):
+ name = self._name
+ code = [ 'int',
+ '%s_%s_get(struct %s *msg, %s *value, ev_uint32_t *plen)' % (
+ self._struct.Name(), name,
+ self._struct.Name(), self._ctype),
+ '{',
+ ' if (msg->%s_set != 1)' % name,
+ ' return (-1);',
+ ' *value = msg->%s_data;' % name,
+ ' *plen = msg->%s_length;' % name,
+ ' return (0);',
+ '}' ]
+ return code
+
+ def CodeUnmarshal(self, buf, tag_name, var_name, var_len):
+ code = ['if (evtag_payload_length(%(buf)s, &%(varlen)s) == -1)',
+ ' return (-1);',
+ # We do not want DoS opportunities
+ 'if (%(varlen)s > evbuffer_get_length(%(buf)s))',
+ ' return (-1);',
+ 'if ((%(var)s = malloc(%(varlen)s)) == NULL)',
+ ' return (-1);',
+ 'if (evtag_unmarshal_fixed(%(buf)s, %(tag)s, %(var)s, '
+ '%(varlen)s) == -1) {',
+ ' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
+ ' return (-1);',
+ '}'
+ ]
+ code = '\n'.join(code) % self.GetTranslation({
+ 'buf' : buf,
+ 'tag' : tag_name,
+ 'var' : var_name,
+ 'varlen' : var_len })
+ return code.split('\n')
+
+ def CodeMarshal(self, buf, tag_name, var_name, var_len):
+ code = ['evtag_marshal(%s, %s, %s, %s);' % (
+ buf, tag_name, var_name, var_len)]
+ return code
+
+ def CodeClear(self, structname):
+ code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
+ ' free (%s->%s_data);' % (structname, self.Name()),
+ ' %s->%s_data = NULL;' % (structname, self.Name()),
+ ' %s->%s_length = 0;' % (structname, self.Name()),
+ ' %s->%s_set = 0;' % (structname, self.Name()),
+ '}'
+ ]
+
+ return code
+
+ def CodeInitialize(self, name):
+ code = ['%s->%s_data = NULL;' % (name, self._name),
+ '%s->%s_length = 0;' % (name, self._name) ]
+ return code
+
+ def CodeFree(self, name):
+ code = ['if (%s->%s_data != NULL)' % (name, self._name),
+ ' free(%s->%s_data);' % (name, self._name)]
+
+ return code
+
+ def Declaration(self):
+ dcl = ['ev_uint8_t *%s_data;' % self._name,
+ 'ev_uint32_t %s_length;' % self._name]
+
+ return dcl
+
+class EntryArray(Entry):
+ def __init__(self, entry):
+ # Init base class
+ Entry.__init__(self, entry._type, entry._name, entry._tag)
+
+ self._entry = entry
+ self._refname = entry._refname
+ self._ctype = self._entry._ctype
+ self._optional = True
+ self._optpointer = self._entry._optpointer
+ self._optaddarg = self._entry._optaddarg
+
+ # provide a new function for accessing the variable name
+ def GetVarName(var_name):
+ return '%(var)s->%(name)s_data[%(index)s]' % \
+ self._entry.GetTranslation({'var' : var_name,
+ 'index' : self._index})
+ self._entry.GetVarName = GetVarName
+
+ def GetInitializer(self):
+ return "NULL"
+
+ def GetVarName(self, var_name):
+ return var_name
+
+ def GetVarLen(self, var_name):
+ return '-1'
+
+ def GetDeclaration(self, funcname):
+ """Allows direct access to elements of the array."""
+ code = [
+ 'int %(funcname)s(struct %(parent_name)s *, int, %(ctype)s *);' %
+ self.GetTranslation({ 'funcname' : funcname }) ]
+ return code
+
+ def AssignDeclaration(self, funcname):
+ code = [ 'int %s(struct %s *, int, const %s);' % (
+ funcname, self._struct.Name(), self._ctype ) ]
+ return code
+
+ def AddDeclaration(self, funcname):
+ code = [
+ '%(ctype)s %(optpointer)s '
+ '%(funcname)s(struct %(parent_name)s *msg%(optaddarg)s);' % \
+ self.GetTranslation({ 'funcname' : funcname }) ]
+ return code
+
+ def CodeGet(self):
+ code = """int
+%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, int offset,
+ %(ctype)s *value)
+{
+ if (!msg->%(name)s_set || offset < 0 || offset >= msg->%(name)s_length)
+ return (-1);
+ *value = msg->%(name)s_data[offset];
+ return (0);
+}""" % self.GetTranslation()
+
+ return code.split('\n')
+
+ def CodeAssign(self):
+ code = [
+ 'int',
+ '%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg, int off,',
+ ' const %(ctype)s value)',
+ '{',
+ ' if (!msg->%(name)s_set || off < 0 || off >= msg->%(name)s_length)',
+ ' return (-1);\n',
+ ' {' ]
+ code = TranslateList(code, self.GetTranslation())
+
+ codearrayassign = self._entry.CodeArrayAssign(
+ 'msg->%(name)s_data[off]' % self.GetTranslation(), 'value')
+ code += map(lambda x: ' ' + x, codearrayassign)
+
+ code += TranslateList([
+ ' }',
+ ' return (0);',
+ '}' ], self.GetTranslation())
+
+ return code
+
+ def CodeAdd(self):
+ codearrayadd = self._entry.CodeArrayAdd(
+ 'msg->%(name)s_data[msg->%(name)s_length - 1]' % self.GetTranslation(),
+ 'value')
+ code = [
+ 'static int',
+ '%(parent_name)s_%(name)s_expand_to_hold_more('
+ 'struct %(parent_name)s *msg)',
+ '{',
+ ' int tobe_allocated = msg->%(name)s_num_allocated;',
+ ' %(ctype)s* new_data = NULL;',
+ ' tobe_allocated = !tobe_allocated ? 1 : tobe_allocated << 1;',
+ ' new_data = (%(ctype)s*) realloc(msg->%(name)s_data,',
+ ' tobe_allocated * sizeof(%(ctype)s));',
+ ' if (new_data == NULL)',
+ ' return -1;',
+ ' msg->%(name)s_data = new_data;',
+ ' msg->%(name)s_num_allocated = tobe_allocated;',
+ ' return 0;'
+ '}',
+ '',
+ '%(ctype)s %(optpointer)s',
+ '%(parent_name)s_%(name)s_add('
+ 'struct %(parent_name)s *msg%(optaddarg)s)',
+ '{',
+ ' if (++msg->%(name)s_length >= msg->%(name)s_num_allocated) {',
+ ' if (%(parent_name)s_%(name)s_expand_to_hold_more(msg)<0)',
+ ' goto error;',
+ ' }' ]
+
+ code = TranslateList(code, self.GetTranslation())
+
+ code += map(lambda x: ' ' + x, codearrayadd)
+
+ code += TranslateList([
+ ' msg->%(name)s_set = 1;',
+ ' return %(optreference)s(msg->%(name)s_data['
+ 'msg->%(name)s_length - 1]);',
+ 'error:',
+ ' --msg->%(name)s_length;',
+ ' return (NULL);',
+ '}' ], self.GetTranslation())
+
+ return code
+
+ def CodeComplete(self, structname, var_name):
+ self._index = 'i'
+ tmp = self._entry.CodeComplete(structname, self._entry.GetVarName(var_name))
+ # skip the whole loop if there is nothing to check
+ if not tmp:
+ return []
+
+ translate = self.GetTranslation({ 'structname' : structname })
+ code = [
+ '{',
+ ' int i;',
+ ' for (i = 0; i < %(structname)s->%(name)s_length; ++i) {' ]
+
+ code = TranslateList(code, translate)
+
+ code += map(lambda x: ' ' + x, tmp)
+
+ code += [
+ ' }',
+ '}' ]
+
+ return code
+
+ def CodeUnmarshal(self, buf, tag_name, var_name, var_len):
+ translate = self.GetTranslation({ 'var' : var_name,
+ 'buf' : buf,
+ 'tag' : tag_name,
+ 'init' : self._entry.GetInitializer()})
+ code = [
+ 'if (%(var)s->%(name)s_length >= %(var)s->%(name)s_num_allocated &&',
+ ' %(parent_name)s_%(name)s_expand_to_hold_more(%(var)s) < 0) {',
+ ' puts("HEY NOW");',
+ ' return (-1);',
+ '}']
+
+ # the unmarshal code directly returns
+ code = TranslateList(code, translate)
+
+ self._index = '%(var)s->%(name)s_length' % translate
+ code += self._entry.CodeUnmarshal(buf, tag_name,
+ self._entry.GetVarName(var_name),
+ self._entry.GetVarLen(var_name))
+
+ code += [ '++%(var)s->%(name)s_length;' % translate ]
+
+ return code
+
+ def CodeMarshal(self, buf, tag_name, var_name, var_len):
+ code = ['{',
+ ' int i;',
+ ' for (i = 0; i < %(var)s->%(name)s_length; ++i) {' ]
+
+ self._index = 'i'
+ code += self._entry.CodeMarshal(buf, tag_name,
+ self._entry.GetVarName(var_name),
+ self._entry.GetVarLen(var_name))
+ code += [' }',
+ '}'
+ ]
+
+ code = "\n".join(code) % self.GetTranslation({ 'var' : var_name })
+
+ return code.split('\n')
+
+ def CodeClear(self, structname):
+ translate = self.GetTranslation({ 'structname' : structname })
+ codearrayfree = self._entry.CodeArrayFree(
+ '%(structname)s->%(name)s_data[i]' % self.GetTranslation(
+ { 'structname' : structname } ))
+
+ code = [ 'if (%(structname)s->%(name)s_set == 1) {' ]
+
+ if codearrayfree:
+ code += [
+ ' int i;',
+ ' for (i = 0; i < %(structname)s->%(name)s_length; ++i) {' ]
+
+ code = TranslateList(code, translate)
+
+ if codearrayfree:
+ code += map(lambda x: ' ' + x, codearrayfree)
+ code += [
+ ' }' ]
+
+ code += TranslateList([
+ ' free(%(structname)s->%(name)s_data);',
+ ' %(structname)s->%(name)s_data = NULL;',
+ ' %(structname)s->%(name)s_set = 0;',
+ ' %(structname)s->%(name)s_length = 0;',
+ ' %(structname)s->%(name)s_num_allocated = 0;',
+ '}'
+ ], translate)
+
+ return code
+
+ def CodeInitialize(self, name):
+ code = ['%s->%s_data = NULL;' % (name, self._name),
+ '%s->%s_length = 0;' % (name, self._name),
+ '%s->%s_num_allocated = 0;' % (name, self._name)]
+ return code
+
+ def CodeFree(self, structname):
+ code = self.CodeClear(structname);
+
+ code += TranslateList([
+ 'free(%(structname)s->%(name)s_data);' ],
+ self.GetTranslation({'structname' : structname }))
+
+ return code
+
+ def Declaration(self):
+ dcl = ['%s *%s_data;' % (self._ctype, self._name),
+ 'int %s_length;' % self._name,
+ 'int %s_num_allocated;' % self._name ]
+
+ return dcl
+
+def NormalizeLine(line):
+ global white
+ global cppcomment
+
+ line = cppcomment.sub('', line)
+ line = line.strip()
+ line = white.sub(' ', line)
+
+ return line
+
+def ProcessOneEntry(factory, newstruct, entry):
+ optional = 0
+ array = 0
+ entry_type = ''
+ name = ''
+ tag = ''
+ tag_set = None
+ separator = ''
+ fixed_length = ''
+
+ tokens = entry.split(' ')
+ while tokens:
+ token = tokens[0]
+ tokens = tokens[1:]
+
+ if not entry_type:
+ if not optional and token == 'optional':
+ optional = 1
+ continue
+
+ if not array and token == 'array':
+ array = 1
+ continue
+
+ if not entry_type:
+ entry_type = token
+ continue
+
+ if not name:
+ res = re.match(r'^([^\[\]]+)(\[.*\])?$', token)
+ if not res:
+ raise RpcGenError(
+ 'Cannot parse name: \"%s\" '
+ 'around line %d' % (entry, line_count))
+ name = res.group(1)
+ fixed_length = res.group(2)
+ if fixed_length:
+ fixed_length = fixed_length[1:-1]
+ continue
+
+ if not separator:
+ separator = token
+ if separator != '=':
+ raise RpcGenError('Expected "=" after name \"%s\" got %s'
+ % (name, token))
+ continue
+
+ if not tag_set:
+ tag_set = 1
+ if not re.match(r'^(0x)?[0-9]+$', token):
+ raise RpcGenError('Expected tag number: \"%s\"' % entry)
+ tag = int(token, 0)
+ continue
+
+ raise RpcGenError('Cannot parse \"%s\"' % entry)
+
+ if not tag_set:
+ raise RpcGenError('Need tag number: \"%s\"' % entry)
+
+ # Create the right entry
+ if entry_type == 'bytes':
+ if fixed_length:
+ newentry = factory.EntryBytes(entry_type, name, tag, fixed_length)
+ else:
+ newentry = factory.EntryVarBytes(entry_type, name, tag)
+ elif entry_type == 'int' and not fixed_length:
+ newentry = factory.EntryInt(entry_type, name, tag)
+ elif entry_type == 'int64' and not fixed_length:
+ newentry = factory.EntryInt(entry_type, name, tag, bits=64)
+ elif entry_type == 'string' and not fixed_length:
+ newentry = factory.EntryString(entry_type, name, tag)
+ else:
+ res = structref.match(entry_type)
+ if res:
+ # References another struct defined in our file
+ newentry = factory.EntryStruct(entry_type, name, tag, res.group(1))
+ else:
+ raise RpcGenError('Bad type: "%s" in "%s"' % (entry_type, entry))
+
+ structs = []
+
+ if optional:
+ newentry.MakeOptional()
+ if array:
+ newentry.MakeArray()
+
+ newentry.SetStruct(newstruct)
+ newentry.SetLineCount(line_count)
+ newentry.Verify()
+
+ if array:
+ # We need to encapsulate this entry into a struct
+ newname = newentry.Name()+ '_array'
+
+ # Now borgify the new entry.
+ newentry = factory.EntryArray(newentry)
+ newentry.SetStruct(newstruct)
+ newentry.SetLineCount(line_count)
+ newentry.MakeArray()
+
+ newstruct.AddEntry(newentry)
+
+ return structs
+
+def ProcessStruct(factory, data):
+ tokens = data.split(' ')
+
+ # First three tokens are: 'struct' 'name' '{'
+ newstruct = factory.Struct(tokens[1])
+
+ inside = ' '.join(tokens[3:-1])
+
+ tokens = inside.split(';')
+
+ structs = []
+
+ for entry in tokens:
+ entry = NormalizeLine(entry)
+ if not entry:
+ continue
+
+ # It's possible that new structs get defined in here
+ structs.extend(ProcessOneEntry(factory, newstruct, entry))
+
+ structs.append(newstruct)
+ return structs
+
+def GetNextStruct(file):
+ global line_count
+ global cppdirect
+
+ got_struct = 0
+
+ processed_lines = []
+
+ have_c_comment = 0
+ data = ''
+ while 1:
+ line = file.readline()
+ if not line:
+ break
+
+ line_count += 1
+ line = line[:-1]
+
+ if not have_c_comment and re.search(r'/\*', line):
+ if re.search(r'/\*.*?\*/', line):
+ line = re.sub(r'/\*.*?\*/', '', line)
+ else:
+ line = re.sub(r'/\*.*$', '', line)
+ have_c_comment = 1
+
+ if have_c_comment:
+ if not re.search(r'\*/', line):
+ continue
+ have_c_comment = 0
+ line = re.sub(r'^.*\*/', '', line)
+
+ line = NormalizeLine(line)
+
+ if not line:
+ continue
+
+ if not got_struct:
+ if re.match(r'#include ["<].*[>"]', line):
+ cppdirect.append(line)
+ continue
+
+ if re.match(r'^#(if( |def)|endif)', line):
+ cppdirect.append(line)
+ continue
+
+ if re.match(r'^#define', line):
+ headerdirect.append(line)
+ continue
+
+ if not structdef.match(line):
+ raise RpcGenError('Missing struct on line %d: %s'
+ % (line_count, line))
+ else:
+ got_struct = 1
+ data += line
+ continue
+
+ # We are inside the struct
+ tokens = line.split('}')
+ if len(tokens) == 1:
+ data += ' ' + line
+ continue
+
+ if len(tokens[1]):
+ raise RpcGenError('Trailing garbage after struct on line %d'
+ % line_count)
+
+ # We found the end of the struct
+ data += ' %s}' % tokens[0]
+ break
+
+ # Remove any comments, that might be in there
+ data = re.sub(r'/\*.*\*/', '', data)
+
+ return data
+
+
+def Parse(factory, file):
+ """
+ Parses the input file and returns C code and corresponding header file.
+ """
+
+ entities = []
+
+ while 1:
+ # Just gets the whole struct nicely formatted
+ data = GetNextStruct(file)
+
+ if not data:
+ break
+
+ entities.extend(ProcessStruct(factory, data))
+
+ return entities
+
+class CCodeGenerator:
+ def __init__(self):
+ pass
+
+ def GuardName(self, name):
+ # Use the complete provided path to the input file, with all
+ # non-identifier characters replaced with underscores, to
+ # reduce the chance of a collision between guard macros.
+ return 'EVENT_RPCOUT_' + nonident.sub('_', name).upper() + '_'
+
+ def HeaderPreamble(self, name):
+ guard = self.GuardName(name)
+ pre = (
+ '/*\n'
+ ' * Automatically generated from %s\n'
+ ' */\n\n'
+ '#ifndef %s\n'
+ '#define %s\n\n' ) % (
+ name, guard, guard)
+
+ for statement in headerdirect:
+ pre += '%s\n' % statement
+ if headerdirect:
+ pre += '\n'
+
+ pre += (
+ '#include <event2/util.h> /* for ev_uint*_t */\n'
+ '#include <event2/rpc.h>\n'
+ )
+
+ return pre
+
+ def HeaderPostamble(self, name):
+ guard = self.GuardName(name)
+ return '#endif /* %s */' % guard
+
+ def BodyPreamble(self, name, header_file):
+ global _NAME
+ global _VERSION
+
+ slash = header_file.rfind('/')
+ if slash != -1:
+ header_file = header_file[slash+1:]
+
+ pre = ( '/*\n'
+ ' * Automatically generated from %s\n'
+ ' * by %s/%s. DO NOT EDIT THIS FILE.\n'
+ ' */\n\n' ) % (name, _NAME, _VERSION)
+ pre += ( '#include <stdlib.h>\n'
+ '#include <string.h>\n'
+ '#include <assert.h>\n'
+ '#include <event2/event-config.h>\n'
+ '#include <event2/event.h>\n'
+ '#include <event2/buffer.h>\n'
+ '#include <event2/tag.h>\n\n'
+ '#ifdef EVENT____func__\n'
+ '#define __func__ EVENT____func__\n'
+ '#endif\n\n'
+ )
+
+ for statement in cppdirect:
+ pre += '%s\n' % statement
+
+ pre += '\n#include "%s"\n\n' % header_file
+
+ pre += 'void event_warn(const char *fmt, ...);\n'
+ pre += 'void event_warnx(const char *fmt, ...);\n\n'
+
+ return pre
+
+ def HeaderFilename(self, filename):
+ return '.'.join(filename.split('.')[:-1]) + '.h'
+
+ def CodeFilename(self, filename):
+ return '.'.join(filename.split('.')[:-1]) + '.gen.c'
+
+ def Struct(self, name):
+ return StructCCode(name)
+
+ def EntryBytes(self, entry_type, name, tag, fixed_length):
+ return EntryBytes(entry_type, name, tag, fixed_length)
+
+ def EntryVarBytes(self, entry_type, name, tag):
+ return EntryVarBytes(entry_type, name, tag)
+
+ def EntryInt(self, entry_type, name, tag, bits=32):
+ return EntryInt(entry_type, name, tag, bits)
+
+ def EntryString(self, entry_type, name, tag):
+ return EntryString(entry_type, name, tag)
+
+ def EntryStruct(self, entry_type, name, tag, struct_name):
+ return EntryStruct(entry_type, name, tag, struct_name)
+
+ def EntryArray(self, entry):
+ return EntryArray(entry)
+
+class Usage(RpcGenError):
+ def __init__(self, argv0):
+ RpcGenError.__init__("usage: %s input.rpc [[output.h] output.c]"
+ % argv0)
+
+class CommandLine:
+ def __init__(self, argv):
+ """Initialize a command-line to launch event_rpcgen, as if
+ from a command-line with CommandLine(sys.argv). If you're
+ calling this directly, remember to provide a dummy value
+ for sys.argv[0]
+ """
+ self.filename = None
+ self.header_file = None
+ self.impl_file = None
+ self.factory = CCodeGenerator()
+
+ if len(argv) >= 2 and argv[1] == '--quiet':
+ global QUIETLY
+ QUIETLY = 1
+ del argv[1]
+
+ if len(argv) < 2 or len(argv) > 4:
+ raise Usage(argv[0])
+
+ self.filename = argv[1].replace('\\', '/')
+ if len(argv) == 3:
+ self.impl_file = argv[2].replace('\\', '/')
+ if len(argv) == 4:
+ self.header_file = argv[2].replace('\\', '/')
+ self.impl_file = argv[3].replace('\\', '/')
+
+ if not self.filename:
+ raise Usage(argv[0])
+
+ if not self.impl_file:
+ self.impl_file = self.factory.CodeFilename(self.filename)
+
+ if not self.header_file:
+ self.header_file = self.factory.HeaderFilename(self.impl_file)
+
+ if not self.impl_file.endswith('.c'):
+ raise RpcGenError("can only generate C implementation files")
+ if not self.header_file.endswith('.h'):
+ raise RpcGenError("can only generate C header files")
+
+ def run(self):
+ filename = self.filename
+ header_file = self.header_file
+ impl_file = self.impl_file
+ factory = self.factory
+
+ declare('Reading \"%s\"' % filename)
+
+ fp = open(filename, 'r')
+ entities = Parse(factory, fp)
+ fp.close()
+
+ declare('... creating "%s"' % header_file)
+ header_fp = open(header_file, 'w')
+ print >>header_fp, factory.HeaderPreamble(filename)
+
+ # Create forward declarations: allows other structs to reference
+ # each other
+ for entry in entities:
+ entry.PrintForwardDeclaration(header_fp)
+ print >>header_fp, ''
+
+ for entry in entities:
+ entry.PrintTags(header_fp)
+ entry.PrintDeclaration(header_fp)
+ print >>header_fp, factory.HeaderPostamble(filename)
+ header_fp.close()
+
+ declare('... creating "%s"' % impl_file)
+ impl_fp = open(impl_file, 'w')
+ print >>impl_fp, factory.BodyPreamble(filename, header_file)
+ for entry in entities:
+ entry.PrintCode(impl_fp)
+ impl_fp.close()
+
+if __name__ == '__main__':
+ try:
+ CommandLine(sys.argv).run()
+ sys.exit(0)
+
+ except RpcGenError, e:
+ print >>sys.stderr, e
+ sys.exit(1)
+
+ except EnvironmentError, e:
+ if e.filename and e.strerror:
+ print >>sys.stderr, "%s: %s" % (e.filename, e.strerror)
+ sys.exit(1)
+ elif e.strerror:
+ print >> sys.stderr, e.strerror
+ sys.exit(1)
+ else:
+ raise
diff --git a/libs/libevent/docs/evport.c b/libs/libevent/docs/evport.c
new file mode 100644
index 0000000000..a014386bfe
--- /dev/null
+++ b/libs/libevent/docs/evport.c
@@ -0,0 +1,451 @@
+/*
+ * Submitted by David Pacheco (dp.spambait@gmail.com)
+ *
+ * Copyright 2006-2007 Niels Provos
+ * Copyright 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SUN MICROSYSTEMS, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL SUN MICROSYSTEMS, INC. BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2007 Sun Microsystems. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * evport.c: event backend using Solaris 10 event ports. See port_create(3C).
+ * This implementation is loosely modeled after the one used for select(2) (in
+ * select.c).
+ *
+ * The outstanding events are tracked in a data structure called evport_data.
+ * Each entry in the ed_fds array corresponds to a file descriptor, and contains
+ * pointers to the read and write events that correspond to that fd. (That is,
+ * when the file is readable, the "read" event should handle it, etc.)
+ *
+ * evport_add and evport_del update this data structure. evport_dispatch uses it
+ * to determine where to callback when an event occurs (which it gets from
+ * port_getn).
+ *
+ * Helper functions are used: grow() grows the file descriptor array as
+ * necessary when large fd's come in. reassociate() takes care of maintaining
+ * the proper file-descriptor/event-port associations.
+ *
+ * As in the select(2) implementation, signals are handled by evsignal.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef EVENT__HAVE_EVENT_PORTS
+
+#include <sys/time.h>
+#include <sys/queue.h>
+#include <errno.h>
+#include <poll.h>
+#include <port.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "event2/thread.h"
+
+#include "evthread-internal.h"
+#include "event-internal.h"
+#include "log-internal.h"
+#include "evsignal-internal.h"
+#include "evmap-internal.h"
+
+#define INITIAL_EVENTS_PER_GETN 8
+#define MAX_EVENTS_PER_GETN 4096
+
+/*
+ * Per-file-descriptor information about what events we're subscribed to. These
+ * fields are NULL if no event is subscribed to either of them.
+ */
+
+struct fd_info {
+ /* combinations of EV_READ and EV_WRITE */
+ short fdi_what;
+ /* Index of this fd within ed_pending, plus 1. Zero if this fd is
+ * not in ed_pending. (The +1 is a hack so that memset(0) will set
+ * it to a nil index. */
+ int pending_idx_plus_1;
+};
+
+#define FDI_HAS_READ(fdi) ((fdi)->fdi_what & EV_READ)
+#define FDI_HAS_WRITE(fdi) ((fdi)->fdi_what & EV_WRITE)
+#define FDI_HAS_EVENTS(fdi) (FDI_HAS_READ(fdi) || FDI_HAS_WRITE(fdi))
+#define FDI_TO_SYSEVENTS(fdi) (FDI_HAS_READ(fdi) ? POLLIN : 0) | \
+ (FDI_HAS_WRITE(fdi) ? POLLOUT : 0)
+
+struct evport_data {
+ int ed_port; /* event port for system events */
+ /* How many elements of ed_pending should we look at? */
+ int ed_npending;
+ /* How many elements are allocated in ed_pending and pevtlist? */
+ int ed_maxevents;
+ /* fdi's that we need to reassoc */
+ int *ed_pending;
+ /* storage space for incoming events. */
+ port_event_t *ed_pevtlist;
+
+};
+
+static void* evport_init(struct event_base *);
+static int evport_add(struct event_base *, int fd, short old, short events, void *);
+static int evport_del(struct event_base *, int fd, short old, short events, void *);
+static int evport_dispatch(struct event_base *, struct timeval *);
+static void evport_dealloc(struct event_base *);
+static int grow(struct evport_data *, int min_events);
+
+const struct eventop evportops = {
+ "evport",
+ evport_init,
+ evport_add,
+ evport_del,
+ evport_dispatch,
+ evport_dealloc,
+ 1, /* need reinit */
+ 0, /* features */
+ sizeof(struct fd_info), /* fdinfo length */
+};
+
+/*
+ * Initialize the event port implementation.
+ */
+
+static void*
+evport_init(struct event_base *base)
+{
+ struct evport_data *evpd;
+
+ if (!(evpd = mm_calloc(1, sizeof(struct evport_data))))
+ return (NULL);
+
+ if ((evpd->ed_port = port_create()) == -1) {
+ mm_free(evpd);
+ return (NULL);
+ }
+
+ if (grow(evpd, INITIAL_EVENTS_PER_GETN) < 0) {
+ close(evpd->ed_port);
+ mm_free(evpd);
+ return NULL;
+ }
+
+ evpd->ed_npending = 0;
+
+ evsig_init_(base);
+
+ return (evpd);
+}
+
+static int
+grow(struct evport_data *data, int min_events)
+{
+ int newsize;
+ int *new_pending;
+ port_event_t *new_pevtlist;
+ if (data->ed_maxevents) {
+ newsize = data->ed_maxevents;
+ do {
+ newsize *= 2;
+ } while (newsize < min_events);
+ } else {
+ newsize = min_events;
+ }
+
+ new_pending = mm_realloc(data->ed_pending, sizeof(int)*newsize);
+ if (new_pending == NULL)
+ return -1;
+ data->ed_pending = new_pending;
+ new_pevtlist = mm_realloc(data->ed_pevtlist, sizeof(port_event_t)*newsize);
+ if (new_pevtlist == NULL)
+ return -1;
+ data->ed_pevtlist = new_pevtlist;
+
+ data->ed_maxevents = newsize;
+ return 0;
+}
+
+#ifdef CHECK_INVARIANTS
+/*
+ * Checks some basic properties about the evport_data structure. Because it
+ * checks all file descriptors, this function can be expensive when the maximum
+ * file descriptor ever used is rather large.
+ */
+
+static void
+check_evportop(struct evport_data *evpd)
+{
+ EVUTIL_ASSERT(evpd);
+ EVUTIL_ASSERT(evpd->ed_port > 0);
+}
+
+/*
+ * Verifies very basic integrity of a given port_event.
+ */
+static void
+check_event(port_event_t* pevt)
+{
+ /*
+ * We've only registered for PORT_SOURCE_FD events. The only
+ * other thing we can legitimately receive is PORT_SOURCE_ALERT,
+ * but since we're not using port_alert either, we can assume
+ * PORT_SOURCE_FD.
+ */
+ EVUTIL_ASSERT(pevt->portev_source == PORT_SOURCE_FD);
+}
+
+#else
+#define check_evportop(epop)
+#define check_event(pevt)
+#endif /* CHECK_INVARIANTS */
+
+/*
+ * (Re)associates the given file descriptor with the event port. The OS events
+ * are specified (implicitly) from the fd_info struct.
+ */
+static int
+reassociate(struct evport_data *epdp, struct fd_info *fdip, int fd)
+{
+ int sysevents = FDI_TO_SYSEVENTS(fdip);
+
+ if (sysevents != 0) {
+ if (port_associate(epdp->ed_port, PORT_SOURCE_FD,
+ fd, sysevents, fdip) == -1) {
+ event_warn("port_associate");
+ return (-1);
+ }
+ }
+
+ check_evportop(epdp);
+
+ return (0);
+}
+
+/*
+ * Main event loop - polls port_getn for some number of events, and processes
+ * them.
+ */
+
+static int
+evport_dispatch(struct event_base *base, struct timeval *tv)
+{
+ int i, res;
+ struct evport_data *epdp = base->evbase;
+ port_event_t *pevtlist = epdp->ed_pevtlist;
+
+ /*
+ * port_getn will block until it has at least nevents events. It will
+ * also return how many it's given us (which may be more than we asked
+ * for, as long as it's less than our maximum (ed_maxevents)) in
+ * nevents.
+ */
+ int nevents = 1;
+
+ /*
+ * We have to convert a struct timeval to a struct timespec
+ * (only difference is nanoseconds vs. microseconds). If no time-based
+ * events are active, we should wait for I/O (and tv == NULL).
+ */
+ struct timespec ts;
+ struct timespec *ts_p = NULL;
+ if (tv != NULL) {
+ ts.tv_sec = tv->tv_sec;
+ ts.tv_nsec = tv->tv_usec * 1000;
+ ts_p = &ts;
+ }
+
+ /*
+ * Before doing anything else, we need to reassociate the events we hit
+ * last time which need reassociation. See comment at the end of the
+ * loop below.
+ */
+ for (i = 0; i < epdp->ed_npending; ++i) {
+ struct fd_info *fdi = NULL;
+ const int fd = epdp->ed_pending[i];
+ if (fd != -1) {
+ /* We might have cleared out this event; we need
+ * to be sure that it's still set. */
+ fdi = evmap_io_get_fdinfo_(&base->io, fd);
+ }
+
+ if (fdi != NULL && FDI_HAS_EVENTS(fdi)) {
+ reassociate(epdp, fdi, fd);
+ /* epdp->ed_pending[i] = -1; */
+ fdi->pending_idx_plus_1 = 0;
+ }
+ }
+
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+ res = port_getn(epdp->ed_port, pevtlist, epdp->ed_maxevents,
+ (unsigned int *) &nevents, ts_p);
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ if (res == -1) {
+ if (errno == EINTR || errno == EAGAIN) {
+ return (0);
+ } else if (errno == ETIME) {
+ if (nevents == 0)
+ return (0);
+ } else {
+ event_warn("port_getn");
+ return (-1);
+ }
+ }
+
+ event_debug(("%s: port_getn reports %d events", __func__, nevents));
+
+ for (i = 0; i < nevents; ++i) {
+ port_event_t *pevt = &pevtlist[i];
+ int fd = (int) pevt->portev_object;
+ struct fd_info *fdi = pevt->portev_user;
+ /*EVUTIL_ASSERT(evmap_io_get_fdinfo_(&base->io, fd) == fdi);*/
+
+ check_evportop(epdp);
+ check_event(pevt);
+ epdp->ed_pending[i] = fd;
+ fdi->pending_idx_plus_1 = i + 1;
+
+ /*
+ * Figure out what kind of event it was
+ * (because we have to pass this to the callback)
+ */
+ res = 0;
+ if (pevt->portev_events & (POLLERR|POLLHUP)) {
+ res = EV_READ | EV_WRITE;
+ } else {
+ if (pevt->portev_events & POLLIN)
+ res |= EV_READ;
+ if (pevt->portev_events & POLLOUT)
+ res |= EV_WRITE;
+ }
+
+ /*
+ * Check for the error situations or a hangup situation
+ */
+ if (pevt->portev_events & (POLLERR|POLLHUP|POLLNVAL))
+ res |= EV_READ|EV_WRITE;
+
+ evmap_io_active_(base, fd, res);
+ } /* end of all events gotten */
+ epdp->ed_npending = nevents;
+
+ if (nevents == epdp->ed_maxevents &&
+ epdp->ed_maxevents < MAX_EVENTS_PER_GETN) {
+ /* we used all the space this time. We should be ready
+ * for more events next time around. */
+ grow(epdp, epdp->ed_maxevents * 2);
+ }
+
+ check_evportop(epdp);
+
+ return (0);
+}
+
+
+/*
+ * Adds the given event (so that you will be notified when it happens via
+ * the callback function).
+ */
+
+static int
+evport_add(struct event_base *base, int fd, short old, short events, void *p)
+{
+ struct evport_data *evpd = base->evbase;
+ struct fd_info *fdi = p;
+
+ check_evportop(evpd);
+
+ fdi->fdi_what |= events;
+
+ return reassociate(evpd, fdi, fd);
+}
+
+/*
+ * Removes the given event from the list of events to wait for.
+ */
+
+static int
+evport_del(struct event_base *base, int fd, short old, short events, void *p)
+{
+ struct evport_data *evpd = base->evbase;
+ struct fd_info *fdi = p;
+ int associated = ! fdi->pending_idx_plus_1;
+
+ check_evportop(evpd);
+
+ fdi->fdi_what &= ~(events &(EV_READ|EV_WRITE));
+
+ if (associated) {
+ if (!FDI_HAS_EVENTS(fdi) &&
+ port_dissociate(evpd->ed_port, PORT_SOURCE_FD, fd) == -1) {
+ /*
+ * Ignore EBADFD error the fd could have been closed
+ * before event_del() was called.
+ */
+ if (errno != EBADFD) {
+ event_warn("port_dissociate");
+ return (-1);
+ }
+ } else {
+ if (FDI_HAS_EVENTS(fdi)) {
+ return (reassociate(evpd, fdi, fd));
+ }
+ }
+ } else {
+ if ((fdi->fdi_what & (EV_READ|EV_WRITE)) == 0) {
+ const int i = fdi->pending_idx_plus_1 - 1;
+ EVUTIL_ASSERT(evpd->ed_pending[i] == fd);
+ evpd->ed_pending[i] = -1;
+ fdi->pending_idx_plus_1 = 0;
+ }
+ }
+ return 0;
+}
+
+
+static void
+evport_dealloc(struct event_base *base)
+{
+ struct evport_data *evpd = base->evbase;
+
+ evsig_dealloc_(base);
+
+ close(evpd->ed_port);
+
+ if (evpd->ed_pending)
+ mm_free(evpd->ed_pending);
+ if (evpd->ed_pevtlist)
+ mm_free(evpd->ed_pevtlist);
+
+ mm_free(evpd);
+}
+
+#endif /* EVENT__HAVE_EVENT_PORTS */
diff --git a/libs/libevent/docs/evthread_pthread.c b/libs/libevent/docs/evthread_pthread.c
new file mode 100644
index 0000000000..4e11f74970
--- /dev/null
+++ b/libs/libevent/docs/evthread_pthread.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+/* With glibc we need to define _GNU_SOURCE to get PTHREAD_MUTEX_RECURSIVE.
+ * This comes from evconfig-private.h
+ */
+#include <pthread.h>
+
+struct event_base;
+#include "event2/thread.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include "mm-internal.h"
+#include "evthread-internal.h"
+
+static pthread_mutexattr_t attr_recursive;
+
+static void *
+evthread_posix_lock_alloc(unsigned locktype)
+{
+ pthread_mutexattr_t *attr = NULL;
+ pthread_mutex_t *lock = mm_malloc(sizeof(pthread_mutex_t));
+ if (!lock)
+ return NULL;
+ if (locktype & EVTHREAD_LOCKTYPE_RECURSIVE)
+ attr = &attr_recursive;
+ if (pthread_mutex_init(lock, attr)) {
+ mm_free(lock);
+ return NULL;
+ }
+ return lock;
+}
+
+static void
+evthread_posix_lock_free(void *lock_, unsigned locktype)
+{
+ pthread_mutex_t *lock = lock_;
+ pthread_mutex_destroy(lock);
+ mm_free(lock);
+}
+
+static int
+evthread_posix_lock(unsigned mode, void *lock_)
+{
+ pthread_mutex_t *lock = lock_;
+ if (mode & EVTHREAD_TRY)
+ return pthread_mutex_trylock(lock);
+ else
+ return pthread_mutex_lock(lock);
+}
+
+static int
+evthread_posix_unlock(unsigned mode, void *lock_)
+{
+ pthread_mutex_t *lock = lock_;
+ return pthread_mutex_unlock(lock);
+}
+
+static unsigned long
+evthread_posix_get_id(void)
+{
+ union {
+ pthread_t thr;
+#if EVENT__SIZEOF_PTHREAD_T > EVENT__SIZEOF_LONG
+ ev_uint64_t id;
+#else
+ unsigned long id;
+#endif
+ } r;
+#if EVENT__SIZEOF_PTHREAD_T < EVENT__SIZEOF_LONG
+ memset(&r, 0, sizeof(r));
+#endif
+ r.thr = pthread_self();
+ return (unsigned long)r.id;
+}
+
+static void *
+evthread_posix_cond_alloc(unsigned condflags)
+{
+ pthread_cond_t *cond = mm_malloc(sizeof(pthread_cond_t));
+ if (!cond)
+ return NULL;
+ if (pthread_cond_init(cond, NULL)) {
+ mm_free(cond);
+ return NULL;
+ }
+ return cond;
+}
+
+static void
+evthread_posix_cond_free(void *cond_)
+{
+ pthread_cond_t *cond = cond_;
+ pthread_cond_destroy(cond);
+ mm_free(cond);
+}
+
+static int
+evthread_posix_cond_signal(void *cond_, int broadcast)
+{
+ pthread_cond_t *cond = cond_;
+ int r;
+ if (broadcast)
+ r = pthread_cond_broadcast(cond);
+ else
+ r = pthread_cond_signal(cond);
+ return r ? -1 : 0;
+}
+
+static int
+evthread_posix_cond_wait(void *cond_, void *lock_, const struct timeval *tv)
+{
+ int r;
+ pthread_cond_t *cond = cond_;
+ pthread_mutex_t *lock = lock_;
+
+ if (tv) {
+ struct timeval now, abstime;
+ struct timespec ts;
+ evutil_gettimeofday(&now, NULL);
+ evutil_timeradd(&now, tv, &abstime);
+ ts.tv_sec = abstime.tv_sec;
+ ts.tv_nsec = abstime.tv_usec*1000;
+ r = pthread_cond_timedwait(cond, lock, &ts);
+ if (r == ETIMEDOUT)
+ return 1;
+ else if (r)
+ return -1;
+ else
+ return 0;
+ } else {
+ r = pthread_cond_wait(cond, lock);
+ return r ? -1 : 0;
+ }
+}
+
+int
+evthread_use_pthreads(void)
+{
+ struct evthread_lock_callbacks cbs = {
+ EVTHREAD_LOCK_API_VERSION,
+ EVTHREAD_LOCKTYPE_RECURSIVE,
+ evthread_posix_lock_alloc,
+ evthread_posix_lock_free,
+ evthread_posix_lock,
+ evthread_posix_unlock
+ };
+ struct evthread_condition_callbacks cond_cbs = {
+ EVTHREAD_CONDITION_API_VERSION,
+ evthread_posix_cond_alloc,
+ evthread_posix_cond_free,
+ evthread_posix_cond_signal,
+ evthread_posix_cond_wait
+ };
+ /* Set ourselves up to get recursive locks. */
+ if (pthread_mutexattr_init(&attr_recursive))
+ return -1;
+ if (pthread_mutexattr_settype(&attr_recursive, PTHREAD_MUTEX_RECURSIVE))
+ return -1;
+
+ evthread_set_lock_callbacks(&cbs);
+ evthread_set_condition_callbacks(&cond_cbs);
+ evthread_set_id_callback(evthread_posix_get_id);
+ return 0;
+}
diff --git a/libs/libevent/docs/kqueue-internal.h b/libs/libevent/docs/kqueue-internal.h
new file mode 100644
index 0000000000..02c5a3606c
--- /dev/null
+++ b/libs/libevent/docs/kqueue-internal.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef KQUEUE_INTERNAL_H_INCLUDED_
+#define KQUEUE_INTERNAL_H_INCLUDED_
+
+/** Notification function, used to tell an event base to wake up from another
+ * thread. Only works when event_kq_add_notify_event_() has previously been
+ * called successfully on that base. */
+int event_kq_notify_base_(struct event_base *base);
+
+/** Prepare a kqueue-using event base to receive notifications via an internal
+ * EVFILT_USER event. Return 0 on sucess, -1 on failure.
+ */
+int event_kq_add_notify_event_(struct event_base *base);
+
+#endif
diff --git a/libs/libevent/docs/kqueue.c b/libs/libevent/docs/kqueue.c
new file mode 100644
index 0000000000..1f41b5a768
--- /dev/null
+++ b/libs/libevent/docs/kqueue.c
@@ -0,0 +1,567 @@
+/* $OpenBSD: kqueue.c,v 1.5 2002/07/10 14:41:31 art Exp $ */
+
+/*
+ * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef EVENT__HAVE_KQUEUE
+
+#include <sys/types.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#include <sys/event.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#ifdef EVENT__HAVE_INTTYPES_H
+#include <inttypes.h>
+#endif
+
+/* Some platforms apparently define the udata field of struct kevent as
+ * intptr_t, whereas others define it as void*. There doesn't seem to be an
+ * easy way to tell them apart via autoconf, so we need to use OS macros. */
+#if defined(EVENT__HAVE_INTTYPES_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__darwin__) && !defined(__APPLE__) && !defined(__CloudABI__)
+#define PTR_TO_UDATA(x) ((intptr_t)(x))
+#define INT_TO_UDATA(x) ((intptr_t)(x))
+#else
+#define PTR_TO_UDATA(x) (x)
+#define INT_TO_UDATA(x) ((void*)(x))
+#endif
+
+#include "event-internal.h"
+#include "log-internal.h"
+#include "evmap-internal.h"
+#include "event2/thread.h"
+#include "evthread-internal.h"
+#include "changelist-internal.h"
+
+#include "kqueue-internal.h"
+
+#define NEVENT 64
+
+struct kqop {
+ struct kevent *changes;
+ int changes_size;
+
+ struct kevent *events;
+ int events_size;
+ int kq;
+ int notify_event_added;
+ pid_t pid;
+};
+
+static void kqop_free(struct kqop *kqop);
+
+static void *kq_init(struct event_base *);
+static int kq_sig_add(struct event_base *, int, short, short, void *);
+static int kq_sig_del(struct event_base *, int, short, short, void *);
+static int kq_dispatch(struct event_base *, struct timeval *);
+static void kq_dealloc(struct event_base *);
+
+const struct eventop kqops = {
+ "kqueue",
+ kq_init,
+ event_changelist_add_,
+ event_changelist_del_,
+ kq_dispatch,
+ kq_dealloc,
+ 1 /* need reinit */,
+ EV_FEATURE_ET|EV_FEATURE_O1|EV_FEATURE_FDS,
+ EVENT_CHANGELIST_FDINFO_SIZE
+};
+
+static const struct eventop kqsigops = {
+ "kqueue_signal",
+ NULL,
+ kq_sig_add,
+ kq_sig_del,
+ NULL,
+ NULL,
+ 1 /* need reinit */,
+ 0,
+ 0
+};
+
+static void *
+kq_init(struct event_base *base)
+{
+ int kq = -1;
+ struct kqop *kqueueop = NULL;
+
+ if (!(kqueueop = mm_calloc(1, sizeof(struct kqop))))
+ return (NULL);
+
+/* Initialize the kernel queue */
+
+ if ((kq = kqueue()) == -1) {
+ event_warn("kqueue");
+ goto err;
+ }
+
+ kqueueop->kq = kq;
+
+ kqueueop->pid = getpid();
+
+ /* Initialize fields */
+ kqueueop->changes = mm_calloc(NEVENT, sizeof(struct kevent));
+ if (kqueueop->changes == NULL)
+ goto err;
+ kqueueop->events = mm_calloc(NEVENT, sizeof(struct kevent));
+ if (kqueueop->events == NULL)
+ goto err;
+ kqueueop->events_size = kqueueop->changes_size = NEVENT;
+
+ /* Check for Mac OS X kqueue bug. */
+ memset(&kqueueop->changes[0], 0, sizeof kqueueop->changes[0]);
+ kqueueop->changes[0].ident = -1;
+ kqueueop->changes[0].filter = EVFILT_READ;
+ kqueueop->changes[0].flags = EV_ADD;
+ /*
+ * If kqueue works, then kevent will succeed, and it will
+ * stick an error in events[0]. If kqueue is broken, then
+ * kevent will fail.
+ */
+ if (kevent(kq,
+ kqueueop->changes, 1, kqueueop->events, NEVENT, NULL) != 1 ||
+ (int)kqueueop->events[0].ident != -1 ||
+ kqueueop->events[0].flags != EV_ERROR) {
+ event_warn("%s: detected broken kqueue; not using.", __func__);
+ goto err;
+ }
+
+ base->evsigsel = &kqsigops;
+
+ return (kqueueop);
+err:
+ if (kqueueop)
+ kqop_free(kqueueop);
+
+ return (NULL);
+}
+
+#define ADD_UDATA 0x30303
+
+static void
+kq_setup_kevent(struct kevent *out, evutil_socket_t fd, int filter, short change)
+{
+ memset(out, 0, sizeof(struct kevent));
+ out->ident = fd;
+ out->filter = filter;
+
+ if (change & EV_CHANGE_ADD) {
+ out->flags = EV_ADD;
+ /* We set a magic number here so that we can tell 'add'
+ * errors from 'del' errors. */
+ out->udata = INT_TO_UDATA(ADD_UDATA);
+ if (change & EV_ET)
+ out->flags |= EV_CLEAR;
+#ifdef NOTE_EOF
+ /* Make it behave like select() and poll() */
+ if (filter == EVFILT_READ)
+ out->fflags = NOTE_EOF;
+#endif
+ } else {
+ EVUTIL_ASSERT(change & EV_CHANGE_DEL);
+ out->flags = EV_DELETE;
+ }
+}
+
+static int
+kq_build_changes_list(const struct event_changelist *changelist,
+ struct kqop *kqop)
+{
+ int i;
+ int n_changes = 0;
+
+ for (i = 0; i < changelist->n_changes; ++i) {
+ struct event_change *in_ch = &changelist->changes[i];
+ struct kevent *out_ch;
+ if (n_changes >= kqop->changes_size - 1) {
+ int newsize = kqop->changes_size * 2;
+ struct kevent *newchanges;
+
+ newchanges = mm_realloc(kqop->changes,
+ newsize * sizeof(struct kevent));
+ if (newchanges == NULL) {
+ event_warn("%s: realloc", __func__);
+ return (-1);
+ }
+ kqop->changes = newchanges;
+ kqop->changes_size = newsize;
+ }
+ if (in_ch->read_change) {
+ out_ch = &kqop->changes[n_changes++];
+ kq_setup_kevent(out_ch, in_ch->fd, EVFILT_READ,
+ in_ch->read_change);
+ }
+ if (in_ch->write_change) {
+ out_ch = &kqop->changes[n_changes++];
+ kq_setup_kevent(out_ch, in_ch->fd, EVFILT_WRITE,
+ in_ch->write_change);
+ }
+ }
+ return n_changes;
+}
+
+static int
+kq_grow_events(struct kqop *kqop, size_t new_size)
+{
+ struct kevent *newresult;
+
+ newresult = mm_realloc(kqop->events,
+ new_size * sizeof(struct kevent));
+
+ if (newresult) {
+ kqop->events = newresult;
+ kqop->events_size = new_size;
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+static int
+kq_dispatch(struct event_base *base, struct timeval *tv)
+{
+ struct kqop *kqop = base->evbase;
+ struct kevent *events = kqop->events;
+ struct kevent *changes;
+ struct timespec ts, *ts_p = NULL;
+ int i, n_changes, res;
+
+ if (tv != NULL) {
+ ts.tv_sec = tv->tv_sec;
+ ts.tv_nsec = tv->tv_usec * 1000;
+ ts_p = &ts;
+ }
+
+ /* Build "changes" from "base->changes" */
+ EVUTIL_ASSERT(kqop->changes);
+ n_changes = kq_build_changes_list(&base->changelist, kqop);
+ if (n_changes < 0)
+ return -1;
+
+ event_changelist_remove_all_(&base->changelist, base);
+
+ /* steal the changes array in case some broken code tries to call
+ * dispatch twice at once. */
+ changes = kqop->changes;
+ kqop->changes = NULL;
+
+ /* Make sure that 'events' is at least as long as the list of changes:
+ * otherwise errors in the changes can get reported as a -1 return
+ * value from kevent() rather than as EV_ERROR events in the events
+ * array.
+ *
+ * (We could instead handle -1 return values from kevent() by
+ * retrying with a smaller changes array or a larger events array,
+ * but this approach seems less risky for now.)
+ */
+ if (kqop->events_size < n_changes) {
+ int new_size = kqop->events_size;
+ do {
+ new_size *= 2;
+ } while (new_size < n_changes);
+
+ kq_grow_events(kqop, new_size);
+ events = kqop->events;
+ }
+
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+ res = kevent(kqop->kq, changes, n_changes,
+ events, kqop->events_size, ts_p);
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ EVUTIL_ASSERT(kqop->changes == NULL);
+ kqop->changes = changes;
+
+ if (res == -1) {
+ if (errno != EINTR) {
+ event_warn("kevent");
+ return (-1);
+ }
+
+ return (0);
+ }
+
+ event_debug(("%s: kevent reports %d", __func__, res));
+
+ for (i = 0; i < res; i++) {
+ int which = 0;
+
+ if (events[i].flags & EV_ERROR) {
+ switch (events[i].data) {
+
+ /* Can occur on delete if we are not currently
+ * watching any events on this fd. That can
+ * happen when the fd was closed and another
+ * file was opened with that fd. */
+ case ENOENT:
+ /* Can occur for reasons not fully understood
+ * on FreeBSD. */
+ case EINVAL:
+ continue;
+#if defined(__FreeBSD__)
+ /*
+ * This currently occurs if an FD is closed
+ * before the EV_DELETE makes it out via kevent().
+ * The FreeBSD capabilities code sees the blank
+ * capability set and rejects the request to
+ * modify an event.
+ *
+ * To be strictly correct - when an FD is closed,
+ * all the registered events are also removed.
+ * Queuing EV_DELETE to a closed FD is wrong.
+ * The event(s) should just be deleted from
+ * the pending changelist.
+ */
+ case ENOTCAPABLE:
+ continue;
+#endif
+
+ /* Can occur on a delete if the fd is closed. */
+ case EBADF:
+ /* XXXX On NetBSD, we can also get EBADF if we
+ * try to add the write side of a pipe, but
+ * the read side has already been closed.
+ * Other BSDs call this situation 'EPIPE'. It
+ * would be good if we had a way to report
+ * this situation. */
+ continue;
+ /* These two can occur on an add if the fd was one side
+ * of a pipe, and the other side was closed. */
+ case EPERM:
+ case EPIPE:
+ /* Report read events, if we're listening for
+ * them, so that the user can learn about any
+ * add errors. (If the operation was a
+ * delete, then udata should be cleared.) */
+ if (events[i].udata) {
+ /* The operation was an add:
+ * report the error as a read. */
+ which |= EV_READ;
+ break;
+ } else {
+ /* The operation was a del:
+ * report nothing. */
+ continue;
+ }
+
+ /* Other errors shouldn't occur. */
+ default:
+ errno = events[i].data;
+ return (-1);
+ }
+ } else if (events[i].filter == EVFILT_READ) {
+ which |= EV_READ;
+ } else if (events[i].filter == EVFILT_WRITE) {
+ which |= EV_WRITE;
+ } else if (events[i].filter == EVFILT_SIGNAL) {
+ which |= EV_SIGNAL;
+#ifdef EVFILT_USER
+ } else if (events[i].filter == EVFILT_USER) {
+ base->is_notify_pending = 0;
+#endif
+ }
+
+ if (!which)
+ continue;
+
+ if (events[i].filter == EVFILT_SIGNAL) {
+ evmap_signal_active_(base, events[i].ident, 1);
+ } else {
+ evmap_io_active_(base, events[i].ident, which | EV_ET);
+ }
+ }
+
+ if (res == kqop->events_size) {
+ /* We used all the events space that we have. Maybe we should
+ make it bigger. */
+ kq_grow_events(kqop, kqop->events_size * 2);
+ }
+
+ return (0);
+}
+
+static void
+kqop_free(struct kqop *kqop)
+{
+ if (kqop->changes)
+ mm_free(kqop->changes);
+ if (kqop->events)
+ mm_free(kqop->events);
+ if (kqop->kq >= 0 && kqop->pid == getpid())
+ close(kqop->kq);
+ memset(kqop, 0, sizeof(struct kqop));
+ mm_free(kqop);
+}
+
+static void
+kq_dealloc(struct event_base *base)
+{
+ struct kqop *kqop = base->evbase;
+ evsig_dealloc_(base);
+ kqop_free(kqop);
+}
+
+/* signal handling */
+static int
+kq_sig_add(struct event_base *base, int nsignal, short old, short events, void *p)
+{
+ struct kqop *kqop = base->evbase;
+ struct kevent kev;
+ struct timespec timeout = { 0, 0 };
+ (void)p;
+
+ EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG);
+
+ memset(&kev, 0, sizeof(kev));
+ kev.ident = nsignal;
+ kev.filter = EVFILT_SIGNAL;
+ kev.flags = EV_ADD;
+
+ /* Be ready for the signal if it is sent any
+ * time between now and the next call to
+ * kq_dispatch. */
+ if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
+ return (-1);
+
+ /* We can set the handler for most signals to SIG_IGN and
+ * still have them reported to us in the queue. However,
+ * if the handler for SIGCHLD is SIG_IGN, the system reaps
+ * zombie processes for us, and we don't get any notification.
+ * This appears to be the only signal with this quirk. */
+ if (evsig_set_handler_(base, nsignal,
+ nsignal == SIGCHLD ? SIG_DFL : SIG_IGN) == -1)
+ return (-1);
+
+ return (0);
+}
+
+static int
+kq_sig_del(struct event_base *base, int nsignal, short old, short events, void *p)
+{
+ struct kqop *kqop = base->evbase;
+ struct kevent kev;
+
+ struct timespec timeout = { 0, 0 };
+ (void)p;
+
+ EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG);
+
+ memset(&kev, 0, sizeof(kev));
+ kev.ident = nsignal;
+ kev.filter = EVFILT_SIGNAL;
+ kev.flags = EV_DELETE;
+
+ /* Because we insert signal events
+ * immediately, we need to delete them
+ * immediately, too */
+ if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
+ return (-1);
+
+ if (evsig_restore_handler_(base, nsignal) == -1)
+ return (-1);
+
+ return (0);
+}
+
+
+/* OSX 10.6 and FreeBSD 8.1 add support for EVFILT_USER, which we can use
+ * to wake up the event loop from another thread. */
+
+/* Magic number we use for our filter ID. */
+#define NOTIFY_IDENT 42
+
+int
+event_kq_add_notify_event_(struct event_base *base)
+{
+ struct kqop *kqop = base->evbase;
+#if defined(EVFILT_USER) && defined(NOTE_TRIGGER)
+ struct kevent kev;
+ struct timespec timeout = { 0, 0 };
+#endif
+
+ if (kqop->notify_event_added)
+ return 0;
+
+#if defined(EVFILT_USER) && defined(NOTE_TRIGGER)
+ memset(&kev, 0, sizeof(kev));
+ kev.ident = NOTIFY_IDENT;
+ kev.filter = EVFILT_USER;
+ kev.flags = EV_ADD | EV_CLEAR;
+
+ if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) {
+ event_warn("kevent: adding EVFILT_USER event");
+ return -1;
+ }
+
+ kqop->notify_event_added = 1;
+
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+int
+event_kq_notify_base_(struct event_base *base)
+{
+ struct kqop *kqop = base->evbase;
+#if defined(EVFILT_USER) && defined(NOTE_TRIGGER)
+ struct kevent kev;
+ struct timespec timeout = { 0, 0 };
+#endif
+ if (! kqop->notify_event_added)
+ return -1;
+
+#if defined(EVFILT_USER) && defined(NOTE_TRIGGER)
+ memset(&kev, 0, sizeof(kev));
+ kev.ident = NOTIFY_IDENT;
+ kev.filter = EVFILT_USER;
+ kev.fflags = NOTE_TRIGGER;
+
+ if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) {
+ event_warn("kevent: triggering EVFILT_USER event");
+ return -1;
+ }
+
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+#endif /* EVENT__HAVE_KQUEUE */
diff --git a/libs/libevent/docs/libevent.pc.in b/libs/libevent/docs/libevent.pc.in
new file mode 100644
index 0000000000..7030884eeb
--- /dev/null
+++ b/libs/libevent/docs/libevent.pc.in
@@ -0,0 +1,16 @@
+#libevent pkg-config source file
+
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libevent
+Description: libevent is an asynchronous notification event loop library
+Version: @VERSION@
+Requires:
+Conflicts:
+Libs: -L${libdir} -levent
+Libs.private: @LIBS@
+Cflags: -I${includedir}
+
diff --git a/libs/libevent/docs/libevent_openssl.pc.in b/libs/libevent/docs/libevent_openssl.pc.in
new file mode 100644
index 0000000000..a65d1e0668
--- /dev/null
+++ b/libs/libevent/docs/libevent_openssl.pc.in
@@ -0,0 +1,16 @@
+#libevent pkg-config source file
+
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libevent_openssl
+Description: libevent_openssl adds openssl-based TLS support to libevent
+Version: @VERSION@
+Requires: libevent
+Conflicts:
+Libs: -L${libdir} -levent_openssl
+Libs.private: @LIBS@ @OPENSSL_LIBS@
+Cflags: -I${includedir} @OPENSSL_INCS@
+
diff --git a/libs/libevent/docs/libevent_pthreads.pc.in b/libs/libevent/docs/libevent_pthreads.pc.in
new file mode 100644
index 0000000000..9bc2392b34
--- /dev/null
+++ b/libs/libevent/docs/libevent_pthreads.pc.in
@@ -0,0 +1,16 @@
+#libevent pkg-config source file
+
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libevent_pthreads
+Description: libevent_pthreads adds pthreads-based threading support to libevent
+Version: @VERSION@
+Requires: libevent
+Conflicts:
+Libs: -L${libdir} -levent_pthreads
+Libs.private: @LIBS@ @PTHREAD_LIBS@
+Cflags: -I${includedir} @PTHREAD_CFLAGS@
+
diff --git a/libs/libevent/docs/m4/ac_backport_259_ssizet.m4 b/libs/libevent/docs/m4/ac_backport_259_ssizet.m4
new file mode 100644
index 0000000000..75fde386cb
--- /dev/null
+++ b/libs/libevent/docs/m4/ac_backport_259_ssizet.m4
@@ -0,0 +1,3 @@
+AN_IDENTIFIER([ssize_t], [AC_TYPE_SSIZE_T])
+AC_DEFUN([AC_TYPE_SSIZE_T], [AC_CHECK_TYPE(ssize_t, int)])
+
diff --git a/libs/libevent/docs/m4/acx_pthread.m4 b/libs/libevent/docs/m4/acx_pthread.m4
new file mode 100644
index 0000000000..d2b116945f
--- /dev/null
+++ b/libs/libevent/docs/m4/acx_pthread.m4
@@ -0,0 +1,279 @@
+##### http://autoconf-archive.cryp.to/acx_pthread.html
+#
+# SYNOPSIS
+#
+# ACX_PTHREAD([ACTION-IF-FOUND[, ACTION-IF-NOT-FOUND]])
+#
+# DESCRIPTION
+#
+# This macro figures out how to build C programs using POSIX threads.
+# It sets the PTHREAD_LIBS output variable to the threads library and
+# linker flags, and the PTHREAD_CFLAGS output variable to any special
+# C compiler flags that are needed. (The user can also force certain
+# compiler flags/libs to be tested by setting these environment
+# variables.)
+#
+# Also sets PTHREAD_CC to any special C compiler that is needed for
+# multi-threaded programs (defaults to the value of CC otherwise).
+# (This is necessary on AIX to use the special cc_r compiler alias.)
+#
+# NOTE: You are assumed to not only compile your program with these
+# flags, but also link it with them as well. e.g. you should link
+# with $PTHREAD_CC $CFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS
+# $LIBS
+#
+# If you are only building threads programs, you may wish to use
+# these variables in your default LIBS, CFLAGS, and CC:
+#
+# LIBS="$PTHREAD_LIBS $LIBS"
+# CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+# CC="$PTHREAD_CC"
+#
+# In addition, if the PTHREAD_CREATE_JOINABLE thread-attribute
+# constant has a nonstandard name, defines PTHREAD_CREATE_JOINABLE to
+# that name (e.g. PTHREAD_CREATE_UNDETACHED on AIX).
+#
+# ACTION-IF-FOUND is a list of shell commands to run if a threads
+# library is found, and ACTION-IF-NOT-FOUND is a list of commands to
+# run it if it is not found. If ACTION-IF-FOUND is not specified, the
+# default action will define HAVE_PTHREAD.
+#
+# Please let the authors know if this macro fails on any platform, or
+# if you have any other suggestions or comments. This macro was based
+# on work by SGJ on autoconf scripts for FFTW (http://www.fftw.org/)
+# (with help from M. Frigo), as well as ac_pthread and hb_pthread
+# macros posted by Alejandro Forero Cuervo to the autoconf macro
+# repository. We are also grateful for the helpful feedback of
+# numerous users.
+#
+# LAST MODIFICATION
+#
+# 2007-07-29
+#
+# COPYLEFT
+#
+# Copyright (c) 2007 Steven G. Johnson <stevenj@alum.mit.edu>
+#
+# This program is free software: you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright
+# owner gives unlimited permission to copy, distribute and modify the
+# configure scripts that are the output of Autoconf when processing
+# the Macro. You need not follow the terms of the GNU General Public
+# License when using or distributing such scripts, even though
+# portions of the text of the Macro appear in them. The GNU General
+# Public License (GPL) does govern all other use of the material that
+# constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the
+# Autoconf Macro released by the Autoconf Macro Archive. When you
+# make and distribute a modified version of the Autoconf Macro, you
+# may extend this special exception to the GPL to apply to your
+# modified version as well.
+
+AC_DEFUN([ACX_PTHREAD], [
+AC_REQUIRE([AC_CANONICAL_HOST])
+AC_LANG_SAVE
+AC_LANG_C
+acx_pthread_ok=no
+
+# We used to check for pthread.h first, but this fails if pthread.h
+# requires special compiler flags (e.g. on True64 or Sequent).
+# It gets checked for in the link test anyway.
+
+# First of all, check if the user has set any of the PTHREAD_LIBS,
+# etcetera environment variables, and if threads linking works using
+# them:
+if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+ save_LIBS="$LIBS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+ AC_MSG_CHECKING([for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS])
+ AC_TRY_LINK_FUNC(pthread_join, acx_pthread_ok=yes)
+ AC_MSG_RESULT($acx_pthread_ok)
+ if test x"$acx_pthread_ok" = xno; then
+ PTHREAD_LIBS=""
+ PTHREAD_CFLAGS=""
+ fi
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+fi
+
+# We must check for the threads library under a number of different
+# names; the ordering is very important because some systems
+# (e.g. DEC) have both -lpthread and -lpthreads, where one of the
+# libraries is broken (non-POSIX).
+
+# Create a list of thread flags to try. Items starting with a "-" are
+# C compiler flags, and other items are library names, except for "none"
+# which indicates that we try without any flags at all, and "pthread-config"
+# which is a program returning the flags for the Pth emulation library.
+
+acx_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config"
+
+# The ordering *is* (sometimes) important. Some notes on the
+# individual items follow:
+
+# pthreads: AIX (must check this before -lpthread)
+# none: in case threads are in libc; should be tried before -Kthread and
+# other compiler flags to prevent continual compiler warnings
+# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h)
+# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able)
+# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread)
+# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads)
+# -pthreads: Solaris/gcc
+# -mthreads: Mingw32/gcc, Lynx/gcc
+# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it
+# doesn't hurt to check since this sometimes defines pthreads too;
+# also defines -D_REENTRANT)
+# ... -mt is also the pthreads flag for HP/aCC
+# pthread: Linux, etcetera
+# --thread-safe: KAI C++
+# pthread-config: use pthread-config program (for GNU Pth library)
+
+case "${host_cpu}-${host_os}" in
+ *solaris*)
+
+ # On Solaris (at least, for some versions), libc contains stubbed
+ # (non-functional) versions of the pthreads routines, so link-based
+ # tests will erroneously succeed. (We need to link with -pthreads/-mt/
+ # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather
+ # a function called by this macro, so we could check for that, but
+ # who knows whether they'll stub that too in a future libc.) So,
+ # we'll just look for -pthreads and -lpthread first:
+
+ acx_pthread_flags="-pthreads pthread -mt -pthread $acx_pthread_flags"
+ ;;
+esac
+
+if test x"$acx_pthread_ok" = xno; then
+for flag in $acx_pthread_flags; do
+
+ case $flag in
+ none)
+ AC_MSG_CHECKING([whether pthreads work without any flags])
+ ;;
+
+ -*)
+ AC_MSG_CHECKING([whether pthreads work with $flag])
+ PTHREAD_CFLAGS="$flag"
+ ;;
+
+ pthread-config)
+ AC_CHECK_PROG(acx_pthread_config, pthread-config, yes, no)
+ if test x"$acx_pthread_config" = xno; then continue; fi
+ PTHREAD_CFLAGS="`pthread-config --cflags`"
+ PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`"
+ ;;
+
+ *)
+ AC_MSG_CHECKING([for the pthreads library -l$flag])
+ PTHREAD_LIBS="-l$flag"
+ ;;
+ esac
+
+ save_LIBS="$LIBS"
+ save_CFLAGS="$CFLAGS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+
+ # Check for various functions. We must include pthread.h,
+ # since some functions may be macros. (On the Sequent, we
+ # need a special flag -Kthread to make this header compile.)
+ # We check for pthread_join because it is in -lpthread on IRIX
+ # while pthread_create is in libc. We check for pthread_attr_init
+ # due to DEC craziness with -lpthreads. We check for
+ # pthread_cleanup_push because it is one of the few pthread
+ # functions on Solaris that doesn't have a non-functional libc stub.
+ # We try pthread_create on general principles.
+ AC_TRY_LINK([#include <pthread.h>],
+ [pthread_t th; pthread_join(th, 0);
+ pthread_attr_init(0); pthread_cleanup_push(0, 0);
+ pthread_create(0,0,0,0); pthread_cleanup_pop(0); ],
+ [acx_pthread_ok=yes])
+
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+
+ AC_MSG_RESULT($acx_pthread_ok)
+ if test "x$acx_pthread_ok" = xyes; then
+ break;
+ fi
+
+ PTHREAD_LIBS=""
+ PTHREAD_CFLAGS=""
+done
+fi
+
+# Various other checks:
+if test "x$acx_pthread_ok" = xyes; then
+ save_LIBS="$LIBS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+
+ # Detect AIX lossage: JOINABLE attribute is called UNDETACHED.
+ AC_MSG_CHECKING([for joinable pthread attribute])
+ attr_name=unknown
+ for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do
+ AC_TRY_LINK([#include <pthread.h>], [int attr=$attr; return attr;],
+ [attr_name=$attr; break])
+ done
+ AC_MSG_RESULT($attr_name)
+ if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then
+ AC_DEFINE_UNQUOTED(PTHREAD_CREATE_JOINABLE, $attr_name,
+ [Define to necessary symbol if this constant
+ uses a non-standard name on your system.])
+ fi
+
+ AC_MSG_CHECKING([if more special flags are required for pthreads])
+ flag=no
+ case "${host_cpu}-${host_os}" in
+ *-aix* | *-freebsd* | *-darwin*) flag="-D_THREAD_SAFE";;
+ *solaris* | *-osf* | *-hpux*) flag="-D_REENTRANT";;
+ esac
+ AC_MSG_RESULT(${flag})
+ if test "x$flag" != xno; then
+ PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS"
+ fi
+
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+
+ # More AIX lossage: must compile with xlc_r or cc_r
+ if test x"$GCC" != xyes; then
+ AC_CHECK_PROGS(PTHREAD_CC, xlc_r cc_r, ${CC})
+ else
+ PTHREAD_CC=$CC
+ fi
+else
+ PTHREAD_CC="$CC"
+fi
+
+AC_SUBST(PTHREAD_LIBS)
+AC_SUBST(PTHREAD_CFLAGS)
+AC_SUBST(PTHREAD_CC)
+
+# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND:
+if test x"$acx_pthread_ok" = xyes; then
+ ifelse([$1],,AC_DEFINE(HAVE_PTHREAD,1,[Define if you have POSIX threads libraries and header files.]),[$1])
+ :
+else
+ acx_pthread_ok=no
+ $2
+fi
+AC_LANG_RESTORE
+])dnl ACX_PTHREAD
diff --git a/libs/libevent/docs/m4/libevent_openssl.m4 b/libs/libevent/docs/m4/libevent_openssl.m4
new file mode 100644
index 0000000000..3cb064a0bd
--- /dev/null
+++ b/libs/libevent/docs/m4/libevent_openssl.m4
@@ -0,0 +1,47 @@
+dnl ######################################################################
+dnl OpenSSL support
+AC_DEFUN([LIBEVENT_OPENSSL], [
+AC_REQUIRE([NTP_PKG_CONFIG])dnl
+
+case "$enable_openssl" in
+ yes)
+ have_openssl=no
+ case "$PKG_CONFIG" in
+ '')
+ ;;
+ *)
+ OPENSSL_LIBS=`$PKG_CONFIG --libs openssl 2>/dev/null`
+ case "$OPENSSL_LIBS" in
+ '') ;;
+ *) OPENSSL_LIBS="$OPENSSL_LIBS $EV_LIB_GDI $EV_LIB_WS32 $OPENSSL_LIBADD"
+ have_openssl=yes
+ ;;
+ esac
+ OPENSSL_INCS=`$PKG_CONFIG --cflags openssl 2>/dev/null`
+ ;;
+ esac
+ case "$have_openssl" in
+ yes) ;;
+ *)
+ save_LIBS="$LIBS"
+ LIBS=""
+ OPENSSL_LIBS=""
+ AC_SEARCH_LIBS([SSL_new], [ssl],
+ [have_openssl=yes
+ OPENSSL_LIBS="$LIBS -lcrypto $EV_LIB_GDI $EV_LIB_WS32 $OPENSSL_LIBADD"],
+ [have_openssl=no],
+ [-lcrypto $EV_LIB_GDI $EV_LIB_WS32 $OPENSSL_LIBADD])
+ LIBS="$save_LIBS"
+ ;;
+ esac
+ AC_SUBST(OPENSSL_INCS)
+ AC_SUBST(OPENSSL_LIBS)
+ case "$have_openssl" in
+ yes) AC_DEFINE(HAVE_OPENSSL, 1, [Define if the system has openssl]) ;;
+ esac
+ ;;
+esac
+
+# check if we have and should use openssl
+AM_CONDITIONAL(OPENSSL, [test "$enable_openssl" != "no" && test "$have_openssl" = "yes"])
+])
diff --git a/libs/libevent/docs/m4/ntp_pkg_config.m4 b/libs/libevent/docs/m4/ntp_pkg_config.m4
new file mode 100644
index 0000000000..1bce8a6e4d
--- /dev/null
+++ b/libs/libevent/docs/m4/ntp_pkg_config.m4
@@ -0,0 +1,27 @@
+dnl NTP_PKG_CONFIG -*- Autoconf -*-
+dnl
+dnl Look for pkg-config, which must be at least
+dnl $ntp_pkgconfig_min_version.
+dnl
+AC_DEFUN([NTP_PKG_CONFIG], [
+
+dnl lower the minimum version if you find an earlier one works
+ntp_pkgconfig_min_version='0.15.0'
+AC_PATH_TOOL([PKG_CONFIG], [pkg-config])
+AS_UNSET([ac_cv_path_PKG_CONFIG])
+AS_UNSET([ac_cv_path_ac_pt_PKG_CONFIG])
+
+case "$PKG_CONFIG" in
+ /*)
+ AC_MSG_CHECKING([if pkg-config is at least version $ntp_pkgconfig_min_version])
+ if $PKG_CONFIG --atleast-pkgconfig-version $ntp_pkgconfig_min_version; then
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ PKG_CONFIG=""
+ fi
+ ;;
+esac
+
+]) dnl NTP_PKG_CONFIG
+
diff --git a/libs/libevent/docs/make-event-config.sed b/libs/libevent/docs/make-event-config.sed
new file mode 100644
index 0000000000..e31018a2dd
--- /dev/null
+++ b/libs/libevent/docs/make-event-config.sed
@@ -0,0 +1,23 @@
+# Sed script to postprocess config.h into event-config.h.
+
+1i\
+/* event2/event-config.h\
+ *\
+ * This file was generated by autoconf when libevent was built, and post-\
+ * processed by Libevent so that its macros would have a uniform prefix.\
+ *\
+ * DO NOT EDIT THIS FILE.\
+ *\
+ * Do not rely on macros in this file existing in later versions.\
+ */\
+\
+#ifndef EVENT2_EVENT_CONFIG_H_INCLUDED_\
+#define EVENT2_EVENT_CONFIG_H_INCLUDED_\
+
+$a\
+\
+#endif /* event2/event-config.h */
+
+s/#\( *\)define /#\1define EVENT__/
+s/#\( *\)undef /#\1undef EVENT__/
+s/#\( *\)if\(n*\)def /#\1if\2def EVENT__/
diff --git a/libs/libevent/docs/make_epoll_table.py b/libs/libevent/docs/make_epoll_table.py
new file mode 100644
index 0000000000..1b15a91a67
--- /dev/null
+++ b/libs/libevent/docs/make_epoll_table.py
@@ -0,0 +1,63 @@
+#!/usr/bin/python2
+
+def get(old,wc,rc,cc):
+ if ('xxx' in (rc, wc, cc)):
+ return "0",255
+
+ if ('add' in (rc, wc, cc)):
+ events = []
+ if rc == 'add' or (rc != 'del' and 'r' in old):
+ events.append("EPOLLIN")
+ if wc == 'add' or (wc != 'del' and 'w' in old):
+ events.append("EPOLLOUT")
+ if cc == 'add' or (cc != 'del' and 'c' in old):
+ events.append("EPOLLRDHUP")
+
+ if old == "0":
+ op = "EPOLL_CTL_ADD"
+ else:
+ op = "EPOLL_CTL_MOD"
+ return "|".join(events), op
+
+ if ('del' in (rc, wc, cc)):
+ delevents = []
+ modevents = []
+ op = "EPOLL_CTL_DEL"
+
+ if 'r' in old:
+ modevents.append("EPOLLIN")
+ if 'w' in old:
+ modevents.append("EPOLLOUT")
+ if 'c' in old:
+ modevents.append("EPOLLRDHUP")
+
+ for item, event in [(rc,"EPOLLIN"),
+ (wc,"EPOLLOUT"),
+ (cc,"EPOLLRDHUP")]:
+ if item == 'del':
+ delevents.append(event)
+ if event in modevents:
+ modevents.remove(event)
+
+ if modevents:
+ return "|".join(modevents), "EPOLL_CTL_MOD"
+ else:
+ return "|".join(delevents), "EPOLL_CTL_DEL"
+
+ return 0, 0
+
+
+def fmt(op, ev, old, wc, rc, cc):
+ entry = "{ %s, %s },"%(op, ev)
+ print "\t/* old=%3s, write:%3s, read:%3s, close:%3s */\n\t%s" % (
+ old, wc, rc, cc, entry)
+ return len(entry)
+
+for old in ('0','r','w','rw','c','cr','cw','crw'):
+ for wc in ('0', 'add', 'del', 'xxx'):
+ for rc in ('0', 'add', 'del', 'xxx'):
+ for cc in ('0', 'add', 'del', 'xxx'):
+
+ op,ev = get(old,wc,rc,cc)
+
+ fmt(op, ev, old, wc, rc, cc)
diff --git a/libs/libevent/docs/poll.c b/libs/libevent/docs/poll.c
new file mode 100644
index 0000000000..51475934b3
--- /dev/null
+++ b/libs/libevent/docs/poll.c
@@ -0,0 +1,341 @@
+/* $OpenBSD: poll.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
+
+/*
+ * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef EVENT__HAVE_POLL
+
+#include <sys/types.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#include <poll.h>
+#include <signal.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include "event-internal.h"
+#include "evsignal-internal.h"
+#include "log-internal.h"
+#include "evmap-internal.h"
+#include "event2/thread.h"
+#include "evthread-internal.h"
+#include "time-internal.h"
+
+struct pollidx {
+ int idxplus1;
+};
+
+struct pollop {
+ int event_count; /* Highest number alloc */
+ int nfds; /* Highest number used */
+ int realloc_copy; /* True iff we must realloc
+ * event_set_copy */
+ struct pollfd *event_set;
+ struct pollfd *event_set_copy;
+};
+
+static void *poll_init(struct event_base *);
+static int poll_add(struct event_base *, int, short old, short events, void *idx);
+static int poll_del(struct event_base *, int, short old, short events, void *idx);
+static int poll_dispatch(struct event_base *, struct timeval *);
+static void poll_dealloc(struct event_base *);
+
+const struct eventop pollops = {
+ "poll",
+ poll_init,
+ poll_add,
+ poll_del,
+ poll_dispatch,
+ poll_dealloc,
+ 0, /* doesn't need_reinit */
+ EV_FEATURE_FDS,
+ sizeof(struct pollidx),
+};
+
+static void *
+poll_init(struct event_base *base)
+{
+ struct pollop *pollop;
+
+ if (!(pollop = mm_calloc(1, sizeof(struct pollop))))
+ return (NULL);
+
+ evsig_init_(base);
+
+ evutil_weakrand_seed_(&base->weakrand_seed, 0);
+
+ return (pollop);
+}
+
+#ifdef CHECK_INVARIANTS
+static void
+poll_check_ok(struct pollop *pop)
+{
+ int i, idx;
+ struct event *ev;
+
+ for (i = 0; i < pop->fd_count; ++i) {
+ idx = pop->idxplus1_by_fd[i]-1;
+ if (idx < 0)
+ continue;
+ EVUTIL_ASSERT(pop->event_set[idx].fd == i);
+ }
+ for (i = 0; i < pop->nfds; ++i) {
+ struct pollfd *pfd = &pop->event_set[i];
+ EVUTIL_ASSERT(pop->idxplus1_by_fd[pfd->fd] == i+1);
+ }
+}
+#else
+#define poll_check_ok(pop)
+#endif
+
+static int
+poll_dispatch(struct event_base *base, struct timeval *tv)
+{
+ int res, i, j, nfds;
+ long msec = -1;
+ struct pollop *pop = base->evbase;
+ struct pollfd *event_set;
+
+ poll_check_ok(pop);
+
+ nfds = pop->nfds;
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (base->th_base_lock) {
+ /* If we're using this backend in a multithreaded setting,
+ * then we need to work on a copy of event_set, so that we can
+ * let other threads modify the main event_set while we're
+ * polling. If we're not multithreaded, then we'll skip the
+ * copy step here to save memory and time. */
+ if (pop->realloc_copy) {
+ struct pollfd *tmp = mm_realloc(pop->event_set_copy,
+ pop->event_count * sizeof(struct pollfd));
+ if (tmp == NULL) {
+ event_warn("realloc");
+ return -1;
+ }
+ pop->event_set_copy = tmp;
+ pop->realloc_copy = 0;
+ }
+ memcpy(pop->event_set_copy, pop->event_set,
+ sizeof(struct pollfd)*nfds);
+ event_set = pop->event_set_copy;
+ } else {
+ event_set = pop->event_set;
+ }
+#else
+ event_set = pop->event_set;
+#endif
+
+ if (tv != NULL) {
+ msec = evutil_tv_to_msec_(tv);
+ if (msec < 0 || msec > INT_MAX)
+ msec = INT_MAX;
+ }
+
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+ res = poll(event_set, nfds, msec);
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ if (res == -1) {
+ if (errno != EINTR) {
+ event_warn("poll");
+ return (-1);
+ }
+
+ return (0);
+ }
+
+ event_debug(("%s: poll reports %d", __func__, res));
+
+ if (res == 0 || nfds == 0)
+ return (0);
+
+ i = evutil_weakrand_range_(&base->weakrand_seed, nfds);
+ for (j = 0; j < nfds; j++) {
+ int what;
+ if (++i == nfds)
+ i = 0;
+ what = event_set[i].revents;
+ if (!what)
+ continue;
+
+ res = 0;
+
+ /* If the file gets closed notify */
+ if (what & (POLLHUP|POLLERR))
+ what |= POLLIN|POLLOUT;
+ if (what & POLLIN)
+ res |= EV_READ;
+ if (what & POLLOUT)
+ res |= EV_WRITE;
+ if (res == 0)
+ continue;
+
+ evmap_io_active_(base, event_set[i].fd, res);
+ }
+
+ return (0);
+}
+
+static int
+poll_add(struct event_base *base, int fd, short old, short events, void *idx_)
+{
+ struct pollop *pop = base->evbase;
+ struct pollfd *pfd = NULL;
+ struct pollidx *idx = idx_;
+ int i;
+
+ EVUTIL_ASSERT((events & EV_SIGNAL) == 0);
+ if (!(events & (EV_READ|EV_WRITE)))
+ return (0);
+
+ poll_check_ok(pop);
+ if (pop->nfds + 1 >= pop->event_count) {
+ struct pollfd *tmp_event_set;
+ int tmp_event_count;
+
+ if (pop->event_count < 32)
+ tmp_event_count = 32;
+ else
+ tmp_event_count = pop->event_count * 2;
+
+ /* We need more file descriptors */
+ tmp_event_set = mm_realloc(pop->event_set,
+ tmp_event_count * sizeof(struct pollfd));
+ if (tmp_event_set == NULL) {
+ event_warn("realloc");
+ return (-1);
+ }
+ pop->event_set = tmp_event_set;
+
+ pop->event_count = tmp_event_count;
+ pop->realloc_copy = 1;
+ }
+
+ i = idx->idxplus1 - 1;
+
+ if (i >= 0) {
+ pfd = &pop->event_set[i];
+ } else {
+ i = pop->nfds++;
+ pfd = &pop->event_set[i];
+ pfd->events = 0;
+ pfd->fd = fd;
+ idx->idxplus1 = i + 1;
+ }
+
+ pfd->revents = 0;
+ if (events & EV_WRITE)
+ pfd->events |= POLLOUT;
+ if (events & EV_READ)
+ pfd->events |= POLLIN;
+ poll_check_ok(pop);
+
+ return (0);
+}
+
+/*
+ * Nothing to be done here.
+ */
+
+static int
+poll_del(struct event_base *base, int fd, short old, short events, void *idx_)
+{
+ struct pollop *pop = base->evbase;
+ struct pollfd *pfd = NULL;
+ struct pollidx *idx = idx_;
+ int i;
+
+ EVUTIL_ASSERT((events & EV_SIGNAL) == 0);
+ if (!(events & (EV_READ|EV_WRITE)))
+ return (0);
+
+ poll_check_ok(pop);
+ i = idx->idxplus1 - 1;
+ if (i < 0)
+ return (-1);
+
+ /* Do we still want to read or write? */
+ pfd = &pop->event_set[i];
+ if (events & EV_READ)
+ pfd->events &= ~POLLIN;
+ if (events & EV_WRITE)
+ pfd->events &= ~POLLOUT;
+ poll_check_ok(pop);
+ if (pfd->events)
+ /* Another event cares about that fd. */
+ return (0);
+
+ /* Okay, so we aren't interested in that fd anymore. */
+ idx->idxplus1 = 0;
+
+ --pop->nfds;
+ if (i != pop->nfds) {
+ /*
+ * Shift the last pollfd down into the now-unoccupied
+ * position.
+ */
+ memcpy(&pop->event_set[i], &pop->event_set[pop->nfds],
+ sizeof(struct pollfd));
+ idx = evmap_io_get_fdinfo_(&base->io, pop->event_set[i].fd);
+ EVUTIL_ASSERT(idx);
+ EVUTIL_ASSERT(idx->idxplus1 == pop->nfds + 1);
+ idx->idxplus1 = i + 1;
+ }
+
+ poll_check_ok(pop);
+ return (0);
+}
+
+static void
+poll_dealloc(struct event_base *base)
+{
+ struct pollop *pop = base->evbase;
+
+ evsig_dealloc_(base);
+ if (pop->event_set)
+ mm_free(pop->event_set);
+ if (pop->event_set_copy)
+ mm_free(pop->event_set_copy);
+
+ memset(pop, 0, sizeof(struct pollop));
+ mm_free(pop);
+}
+
+#endif /* EVENT__HAVE_POLL */
diff --git a/libs/libevent/docs/sample/dns-example.c b/libs/libevent/docs/sample/dns-example.c
new file mode 100644
index 0000000000..fb705664aa
--- /dev/null
+++ b/libs/libevent/docs/sample/dns-example.c
@@ -0,0 +1,257 @@
+/*
+ This example code shows how to use the high-level, low-level, and
+ server-level interfaces of evdns.
+
+ XXX It's pretty ugly and should probably be cleaned up.
+ */
+
+#include <event2/event-config.h>
+
+/* Compatibility for possible missing IPv6 declarations */
+#include "../ipv6-internal.h"
+
+#include <sys/types.h>
+
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#include <getopt.h>
+#else
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#endif
+
+#include <event2/event.h>
+#include <event2/dns.h>
+#include <event2/dns_struct.h>
+#include <event2/util.h>
+
+#ifdef EVENT__HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define u32 ev_uint32_t
+#define u8 ev_uint8_t
+
+static const char *
+debug_ntoa(u32 address)
+{
+ static char buf[32];
+ u32 a = ntohl(address);
+ evutil_snprintf(buf, sizeof(buf), "%d.%d.%d.%d",
+ (int)(u8)((a>>24)&0xff),
+ (int)(u8)((a>>16)&0xff),
+ (int)(u8)((a>>8 )&0xff),
+ (int)(u8)((a )&0xff));
+ return buf;
+}
+
+static void
+main_callback(int result, char type, int count, int ttl,
+ void *addrs, void *orig) {
+ char *n = (char*)orig;
+ int i;
+ for (i = 0; i < count; ++i) {
+ if (type == DNS_IPv4_A) {
+ printf("%s: %s\n", n, debug_ntoa(((u32*)addrs)[i]));
+ } else if (type == DNS_PTR) {
+ printf("%s: %s\n", n, ((char**)addrs)[i]);
+ }
+ }
+ if (!count) {
+ printf("%s: No answer (%d)\n", n, result);
+ }
+ fflush(stdout);
+}
+
+static void
+gai_callback(int err, struct evutil_addrinfo *ai, void *arg)
+{
+ const char *name = arg;
+ int i;
+ if (err) {
+ printf("%s: %s\n", name, evutil_gai_strerror(err));
+ }
+ if (ai && ai->ai_canonname)
+ printf(" %s ==> %s\n", name, ai->ai_canonname);
+ for (i=0; ai; ai = ai->ai_next, ++i) {
+ char buf[128];
+ if (ai->ai_family == PF_INET) {
+ struct sockaddr_in *sin =
+ (struct sockaddr_in*)ai->ai_addr;
+ evutil_inet_ntop(AF_INET, &sin->sin_addr, buf,
+ sizeof(buf));
+ printf("[%d] %s: %s\n",i,name,buf);
+ } else {
+ struct sockaddr_in6 *sin6 =
+ (struct sockaddr_in6*)ai->ai_addr;
+ evutil_inet_ntop(AF_INET6, &sin6->sin6_addr, buf,
+ sizeof(buf));
+ printf("[%d] %s: %s\n",i,name,buf);
+ }
+ }
+}
+
+static void
+evdns_server_callback(struct evdns_server_request *req, void *data)
+{
+ int i, r;
+ (void)data;
+ /* dummy; give 192.168.11.11 as an answer for all A questions,
+ * give foo.bar.example.com as an answer for all PTR questions. */
+ for (i = 0; i < req->nquestions; ++i) {
+ u32 ans = htonl(0xc0a80b0bUL);
+ if (req->questions[i]->type == EVDNS_TYPE_A &&
+ req->questions[i]->dns_question_class == EVDNS_CLASS_INET) {
+ printf(" -- replying for %s (A)\n", req->questions[i]->name);
+ r = evdns_server_request_add_a_reply(req, req->questions[i]->name,
+ 1, &ans, 10);
+ if (r<0)
+ printf("eeep, didn't work.\n");
+ } else if (req->questions[i]->type == EVDNS_TYPE_PTR &&
+ req->questions[i]->dns_question_class == EVDNS_CLASS_INET) {
+ printf(" -- replying for %s (PTR)\n", req->questions[i]->name);
+ r = evdns_server_request_add_ptr_reply(req, NULL, req->questions[i]->name,
+ "foo.bar.example.com", 10);
+ if (r<0)
+ printf("ugh, no luck");
+ } else {
+ printf(" -- skipping %s [%d %d]\n", req->questions[i]->name,
+ req->questions[i]->type, req->questions[i]->dns_question_class);
+ }
+ }
+
+ r = evdns_server_request_respond(req, 0);
+ if (r<0)
+ printf("eeek, couldn't send reply.\n");
+}
+
+static int verbose = 0;
+
+static void
+logfn(int is_warn, const char *msg) {
+ if (!is_warn && !verbose)
+ return;
+ fprintf(stderr, "%s: %s\n", is_warn?"WARN":"INFO", msg);
+}
+
+int
+main(int c, char **v) {
+ struct options {
+ int reverse;
+ int use_getaddrinfo;
+ int servertest;
+ const char *resolv_conf;
+ const char *ns;
+ };
+ struct options o;
+ char opt;
+ struct event_base *event_base = NULL;
+ struct evdns_base *evdns_base = NULL;
+
+ memset(&o, 0, sizeof(o));
+
+ if (c < 2) {
+ fprintf(stderr, "syntax: %s [-x] [-v] [-c resolv.conf] [-s ns] hostname\n", v[0]);
+ fprintf(stderr, "syntax: %s [-T]\n", v[0]);
+ return 1;
+ }
+
+ while ((opt = getopt(c, v, "xvc:Ts:")) != -1) {
+ switch (opt) {
+ case 'x': o.reverse = 1; break;
+ case 'v': ++verbose; break;
+ case 'g': o.use_getaddrinfo = 1; break;
+ case 'T': o.servertest = 1; break;
+ case 'c': o.resolv_conf = optarg; break;
+ case 's': o.ns = optarg; break;
+ default : fprintf(stderr, "Unknown option %c\n", opt); break;
+ }
+ }
+
+#ifdef _WIN32
+ {
+ WSADATA WSAData;
+ WSAStartup(0x101, &WSAData);
+ }
+#endif
+
+ event_base = event_base_new();
+ evdns_base = evdns_base_new(event_base, EVDNS_BASE_DISABLE_WHEN_INACTIVE);
+ evdns_set_log_fn(logfn);
+
+ if (o.servertest) {
+ evutil_socket_t sock;
+ struct sockaddr_in my_addr;
+ sock = socket(PF_INET, SOCK_DGRAM, 0);
+ if (sock == -1) {
+ perror("socket");
+ exit(1);
+ }
+ evutil_make_socket_nonblocking(sock);
+ my_addr.sin_family = AF_INET;
+ my_addr.sin_port = htons(10053);
+ my_addr.sin_addr.s_addr = INADDR_ANY;
+ if (bind(sock, (struct sockaddr*)&my_addr, sizeof(my_addr))<0) {
+ perror("bind");
+ exit(1);
+ }
+ evdns_add_server_port_with_base(event_base, sock, 0, evdns_server_callback, NULL);
+ }
+ if (optind < c) {
+ int res;
+#ifdef _WIN32
+ if (o.resolv_conf == NULL && !o.ns)
+ res = evdns_base_config_windows_nameservers(evdns_base);
+ else
+#endif
+ if (o.ns)
+ res = evdns_base_nameserver_ip_add(evdns_base, o.ns);
+ else
+ res = evdns_base_resolv_conf_parse(evdns_base,
+ DNS_OPTION_NAMESERVERS, o.resolv_conf);
+
+ if (res < 0) {
+ fprintf(stderr, "Couldn't configure nameservers");
+ return 1;
+ }
+ }
+
+ printf("EVUTIL_AI_CANONNAME in example = %d\n", EVUTIL_AI_CANONNAME);
+ for (; optind < c; ++optind) {
+ if (o.reverse) {
+ struct in_addr addr;
+ if (evutil_inet_pton(AF_INET, v[optind], &addr)!=1) {
+ fprintf(stderr, "Skipping non-IP %s\n", v[optind]);
+ continue;
+ }
+ fprintf(stderr, "resolving %s...\n",v[optind]);
+ evdns_base_resolve_reverse(evdns_base, &addr, 0, main_callback, v[optind]);
+ } else if (o.use_getaddrinfo) {
+ struct evutil_addrinfo hints;
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_protocol = IPPROTO_TCP;
+ hints.ai_flags = EVUTIL_AI_CANONNAME;
+ fprintf(stderr, "resolving (fwd) %s...\n",v[optind]);
+ evdns_getaddrinfo(evdns_base, v[optind], NULL, &hints,
+ gai_callback, v[optind]);
+ } else {
+ fprintf(stderr, "resolving (fwd) %s...\n",v[optind]);
+ evdns_base_resolve_ipv4(evdns_base, v[optind], 0, main_callback, v[optind]);
+ }
+ }
+ fflush(stdout);
+ event_base_dispatch(event_base);
+ return 0;
+}
+
diff --git a/libs/libevent/docs/sample/event-read-fifo.c b/libs/libevent/docs/sample/event-read-fifo.c
new file mode 100644
index 0000000000..27b0b530d5
--- /dev/null
+++ b/libs/libevent/docs/sample/event-read-fifo.c
@@ -0,0 +1,162 @@
+/*
+ * This sample code shows how to use Libevent to read from a named pipe.
+ * XXX This code could make better use of the Libevent interfaces.
+ *
+ * XXX This does not work on Windows; ignore everything inside the _WIN32 block.
+ *
+ * On UNIX, compile with:
+ * cc -I/usr/local/include -o event-read-fifo event-read-fifo.c \
+ * -L/usr/local/lib -levent
+ */
+
+#include <event2/event-config.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifndef _WIN32
+#include <sys/queue.h>
+#include <unistd.h>
+#include <sys/time.h>
+#include <signal.h>
+#else
+#include <winsock2.h>
+#include <windows.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include <event2/event.h>
+
+static void
+fifo_read(evutil_socket_t fd, short event, void *arg)
+{
+ char buf[255];
+ int len;
+ struct event *ev = arg;
+#ifdef _WIN32
+ DWORD dwBytesRead;
+#endif
+
+ fprintf(stderr, "fifo_read called with fd: %d, event: %d, arg: %p\n",
+ (int)fd, event, arg);
+#ifdef _WIN32
+ len = ReadFile((HANDLE)fd, buf, sizeof(buf) - 1, &dwBytesRead, NULL);
+
+ /* Check for end of file. */
+ if (len && dwBytesRead == 0) {
+ fprintf(stderr, "End Of File");
+ event_del(ev);
+ return;
+ }
+
+ buf[dwBytesRead] = '\0';
+#else
+ len = read(fd, buf, sizeof(buf) - 1);
+
+ if (len <= 0) {
+ if (len == -1)
+ perror("read");
+ else if (len == 0)
+ fprintf(stderr, "Connection closed\n");
+ event_del(ev);
+ event_base_loopbreak(event_get_base(ev));
+ return;
+ }
+
+ buf[len] = '\0';
+#endif
+ fprintf(stdout, "Read: %s\n", buf);
+}
+
+/* On Unix, cleanup event.fifo if SIGINT is received. */
+#ifndef _WIN32
+static void
+signal_cb(evutil_socket_t fd, short event, void *arg)
+{
+ struct event_base *base = arg;
+ event_base_loopbreak(base);
+}
+#endif
+
+int
+main(int argc, char **argv)
+{
+ struct event *evfifo;
+ struct event_base* base;
+#ifdef _WIN32
+ HANDLE socket;
+ /* Open a file. */
+ socket = CreateFileA("test.txt", /* open File */
+ GENERIC_READ, /* open for reading */
+ 0, /* do not share */
+ NULL, /* no security */
+ OPEN_EXISTING, /* existing file only */
+ FILE_ATTRIBUTE_NORMAL, /* normal file */
+ NULL); /* no attr. template */
+
+ if (socket == INVALID_HANDLE_VALUE)
+ return 1;
+
+#else
+ struct event *signal_int;
+ struct stat st;
+ const char *fifo = "event.fifo";
+ int socket;
+
+ if (lstat(fifo, &st) == 0) {
+ if ((st.st_mode & S_IFMT) == S_IFREG) {
+ errno = EEXIST;
+ perror("lstat");
+ exit(1);
+ }
+ }
+
+ unlink(fifo);
+ if (mkfifo(fifo, 0600) == -1) {
+ perror("mkfifo");
+ exit(1);
+ }
+
+ socket = open(fifo, O_RDONLY | O_NONBLOCK, 0);
+
+ if (socket == -1) {
+ perror("open");
+ exit(1);
+ }
+
+ fprintf(stderr, "Write data to %s\n", fifo);
+#endif
+ /* Initalize the event library */
+ base = event_base_new();
+
+ /* Initalize one event */
+#ifdef _WIN32
+ evfifo = event_new(base, (evutil_socket_t)socket, EV_READ|EV_PERSIST, fifo_read,
+ event_self_cbarg());
+#else
+ /* catch SIGINT so that event.fifo can be cleaned up */
+ signal_int = evsignal_new(base, SIGINT, signal_cb, base);
+ event_add(signal_int, NULL);
+
+ evfifo = event_new(base, socket, EV_READ|EV_PERSIST, fifo_read,
+ event_self_cbarg());
+#endif
+
+ /* Add it to the active events, without a timeout */
+ event_add(evfifo, NULL);
+
+ event_base_dispatch(base);
+ event_base_free(base);
+#ifdef _WIN32
+ CloseHandle(socket);
+#else
+ close(socket);
+ unlink(fifo);
+#endif
+ libevent_global_shutdown();
+ return (0);
+}
+
diff --git a/libs/libevent/docs/sample/hello-world.c b/libs/libevent/docs/sample/hello-world.c
new file mode 100644
index 0000000000..d3cf058a8b
--- /dev/null
+++ b/libs/libevent/docs/sample/hello-world.c
@@ -0,0 +1,141 @@
+/*
+ This exmple program provides a trivial server program that listens for TCP
+ connections on port 9995. When they arrive, it writes a short message to
+ each client connection, and closes each connection once it is flushed.
+
+ Where possible, it exits cleanly in response to a SIGINT (ctrl-c).
+*/
+
+
+#include <string.h>
+#include <errno.h>
+#include <stdio.h>
+#include <signal.h>
+#ifndef _WIN32
+#include <netinet/in.h>
+# ifdef _XOPEN_SOURCE_EXTENDED
+# include <arpa/inet.h>
+# endif
+#include <sys/socket.h>
+#endif
+
+#include <event2/bufferevent.h>
+#include <event2/buffer.h>
+#include <event2/listener.h>
+#include <event2/util.h>
+#include <event2/event.h>
+
+static const char MESSAGE[] = "Hello, World!\n";
+
+static const int PORT = 9995;
+
+static void listener_cb(struct evconnlistener *, evutil_socket_t,
+ struct sockaddr *, int socklen, void *);
+static void conn_writecb(struct bufferevent *, void *);
+static void conn_eventcb(struct bufferevent *, short, void *);
+static void signal_cb(evutil_socket_t, short, void *);
+
+int
+main(int argc, char **argv)
+{
+ struct event_base *base;
+ struct evconnlistener *listener;
+ struct event *signal_event;
+
+ struct sockaddr_in sin;
+#ifdef _WIN32
+ WSADATA wsa_data;
+ WSAStartup(0x0201, &wsa_data);
+#endif
+
+ base = event_base_new();
+ if (!base) {
+ fprintf(stderr, "Could not initialize libevent!\n");
+ return 1;
+ }
+
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(PORT);
+
+ listener = evconnlistener_new_bind(base, listener_cb, (void *)base,
+ LEV_OPT_REUSEABLE|LEV_OPT_CLOSE_ON_FREE, -1,
+ (struct sockaddr*)&sin,
+ sizeof(sin));
+
+ if (!listener) {
+ fprintf(stderr, "Could not create a listener!\n");
+ return 1;
+ }
+
+ signal_event = evsignal_new(base, SIGINT, signal_cb, (void *)base);
+
+ if (!signal_event || event_add(signal_event, NULL)<0) {
+ fprintf(stderr, "Could not create/add a signal event!\n");
+ return 1;
+ }
+
+ event_base_dispatch(base);
+
+ evconnlistener_free(listener);
+ event_free(signal_event);
+ event_base_free(base);
+
+ printf("done\n");
+ return 0;
+}
+
+static void
+listener_cb(struct evconnlistener *listener, evutil_socket_t fd,
+ struct sockaddr *sa, int socklen, void *user_data)
+{
+ struct event_base *base = user_data;
+ struct bufferevent *bev;
+
+ bev = bufferevent_socket_new(base, fd, BEV_OPT_CLOSE_ON_FREE);
+ if (!bev) {
+ fprintf(stderr, "Error constructing bufferevent!");
+ event_base_loopbreak(base);
+ return;
+ }
+ bufferevent_setcb(bev, NULL, conn_writecb, conn_eventcb, NULL);
+ bufferevent_enable(bev, EV_WRITE);
+ bufferevent_disable(bev, EV_READ);
+
+ bufferevent_write(bev, MESSAGE, strlen(MESSAGE));
+}
+
+static void
+conn_writecb(struct bufferevent *bev, void *user_data)
+{
+ struct evbuffer *output = bufferevent_get_output(bev);
+ if (evbuffer_get_length(output) == 0) {
+ printf("flushed answer\n");
+ bufferevent_free(bev);
+ }
+}
+
+static void
+conn_eventcb(struct bufferevent *bev, short events, void *user_data)
+{
+ if (events & BEV_EVENT_EOF) {
+ printf("Connection closed.\n");
+ } else if (events & BEV_EVENT_ERROR) {
+ printf("Got an error on the connection: %s\n",
+ strerror(errno));/*XXX win32*/
+ }
+ /* None of the other events can happen here, since we haven't enabled
+ * timeouts */
+ bufferevent_free(bev);
+}
+
+static void
+signal_cb(evutil_socket_t sig, short events, void *user_data)
+{
+ struct event_base *base = user_data;
+ struct timeval delay = { 2, 0 };
+
+ printf("Caught an interrupt signal; exiting cleanly in two seconds.\n");
+
+ event_base_loopexit(base, &delay);
+}
diff --git a/libs/libevent/docs/sample/hostcheck.c b/libs/libevent/docs/sample/hostcheck.c
new file mode 100644
index 0000000000..50709369c0
--- /dev/null
+++ b/libs/libevent/docs/sample/hostcheck.c
@@ -0,0 +1,217 @@
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at http://curl.haxx.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ***************************************************************************/
+
+/* This file is an amalgamation of hostcheck.c and most of rawstr.c
+ from cURL. The contents of the COPYING file mentioned above are:
+
+COPYRIGHT AND PERMISSION NOTICE
+
+Copyright (c) 1996 - 2013, Daniel Stenberg, <daniel@haxx.se>.
+
+All rights reserved.
+
+Permission to use, copy, modify, and distribute this software for any purpose
+with or without fee is hereby granted, provided that the above copyright
+notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN
+NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
+OR OTHER DEALINGS IN THE SOFTWARE.
+
+Except as contained in this notice, the name of a copyright holder shall not
+be used in advertising or otherwise to promote the sale, use or other dealings
+in this Software without prior written authorization of the copyright holder.
+*/
+
+#include "hostcheck.h"
+#include <string.h>
+
+/* Portable, consistent toupper (remember EBCDIC). Do not use toupper() because
+ its behavior is altered by the current locale. */
+static char Curl_raw_toupper(char in)
+{
+ switch (in) {
+ case 'a':
+ return 'A';
+ case 'b':
+ return 'B';
+ case 'c':
+ return 'C';
+ case 'd':
+ return 'D';
+ case 'e':
+ return 'E';
+ case 'f':
+ return 'F';
+ case 'g':
+ return 'G';
+ case 'h':
+ return 'H';
+ case 'i':
+ return 'I';
+ case 'j':
+ return 'J';
+ case 'k':
+ return 'K';
+ case 'l':
+ return 'L';
+ case 'm':
+ return 'M';
+ case 'n':
+ return 'N';
+ case 'o':
+ return 'O';
+ case 'p':
+ return 'P';
+ case 'q':
+ return 'Q';
+ case 'r':
+ return 'R';
+ case 's':
+ return 'S';
+ case 't':
+ return 'T';
+ case 'u':
+ return 'U';
+ case 'v':
+ return 'V';
+ case 'w':
+ return 'W';
+ case 'x':
+ return 'X';
+ case 'y':
+ return 'Y';
+ case 'z':
+ return 'Z';
+ }
+ return in;
+}
+
+/*
+ * Curl_raw_equal() is for doing "raw" case insensitive strings. This is meant
+ * to be locale independent and only compare strings we know are safe for
+ * this. See http://daniel.haxx.se/blog/2008/10/15/strcasecmp-in-turkish/ for
+ * some further explanation to why this function is necessary.
+ *
+ * The function is capable of comparing a-z case insensitively even for
+ * non-ascii.
+ */
+
+static int Curl_raw_equal(const char *first, const char *second)
+{
+ while(*first && *second) {
+ if(Curl_raw_toupper(*first) != Curl_raw_toupper(*second))
+ /* get out of the loop as soon as they don't match */
+ break;
+ first++;
+ second++;
+ }
+ /* we do the comparison here (possibly again), just to make sure that if the
+ loop above is skipped because one of the strings reached zero, we must not
+ return this as a successful match */
+ return (Curl_raw_toupper(*first) == Curl_raw_toupper(*second));
+}
+
+static int Curl_raw_nequal(const char *first, const char *second, size_t max)
+{
+ while(*first && *second && max) {
+ if(Curl_raw_toupper(*first) != Curl_raw_toupper(*second)) {
+ break;
+ }
+ max--;
+ first++;
+ second++;
+ }
+ if(0 == max)
+ return 1; /* they are equal this far */
+
+ return Curl_raw_toupper(*first) == Curl_raw_toupper(*second);
+}
+
+/*
+ * Match a hostname against a wildcard pattern.
+ * E.g.
+ * "foo.host.com" matches "*.host.com".
+ *
+ * We use the matching rule described in RFC6125, section 6.4.3.
+ * http://tools.ietf.org/html/rfc6125#section-6.4.3
+ */
+
+static int hostmatch(const char *hostname, const char *pattern)
+{
+ const char *pattern_label_end, *pattern_wildcard, *hostname_label_end;
+ int wildcard_enabled;
+ size_t prefixlen, suffixlen;
+ pattern_wildcard = strchr(pattern, '*');
+ if(pattern_wildcard == NULL)
+ return Curl_raw_equal(pattern, hostname) ?
+ CURL_HOST_MATCH : CURL_HOST_NOMATCH;
+
+ /* We require at least 2 dots in pattern to avoid too wide wildcard
+ match. */
+ wildcard_enabled = 1;
+ pattern_label_end = strchr(pattern, '.');
+ if(pattern_label_end == NULL || strchr(pattern_label_end+1, '.') == NULL ||
+ pattern_wildcard > pattern_label_end ||
+ Curl_raw_nequal(pattern, "xn--", 4)) {
+ wildcard_enabled = 0;
+ }
+ if(!wildcard_enabled)
+ return Curl_raw_equal(pattern, hostname) ?
+ CURL_HOST_MATCH : CURL_HOST_NOMATCH;
+
+ hostname_label_end = strchr(hostname, '.');
+ if(hostname_label_end == NULL ||
+ !Curl_raw_equal(pattern_label_end, hostname_label_end))
+ return CURL_HOST_NOMATCH;
+
+ /* The wildcard must match at least one character, so the left-most
+ label of the hostname is at least as large as the left-most label
+ of the pattern. */
+ if(hostname_label_end - hostname < pattern_label_end - pattern)
+ return CURL_HOST_NOMATCH;
+
+ prefixlen = pattern_wildcard - pattern;
+ suffixlen = pattern_label_end - (pattern_wildcard+1);
+ return Curl_raw_nequal(pattern, hostname, prefixlen) &&
+ Curl_raw_nequal(pattern_wildcard+1, hostname_label_end - suffixlen,
+ suffixlen) ?
+ CURL_HOST_MATCH : CURL_HOST_NOMATCH;
+}
+
+int Curl_cert_hostcheck(const char *match_pattern, const char *hostname)
+{
+ if(!match_pattern || !*match_pattern ||
+ !hostname || !*hostname) /* sanity check */
+ return 0;
+
+ if(Curl_raw_equal(hostname, match_pattern)) /* trivial case */
+ return 1;
+
+ if(hostmatch(hostname,match_pattern) == CURL_HOST_MATCH)
+ return 1;
+ return 0;
+}
diff --git a/libs/libevent/docs/sample/hostcheck.h b/libs/libevent/docs/sample/hostcheck.h
new file mode 100644
index 0000000000..f40bc43435
--- /dev/null
+++ b/libs/libevent/docs/sample/hostcheck.h
@@ -0,0 +1,30 @@
+#ifndef HEADER_CURL_HOSTCHECK_H
+#define HEADER_CURL_HOSTCHECK_H
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at http://curl.haxx.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ***************************************************************************/
+
+#define CURL_HOST_NOMATCH 0
+#define CURL_HOST_MATCH 1
+int Curl_cert_hostcheck(const char *match_pattern, const char *hostname);
+
+#endif /* HEADER_CURL_HOSTCHECK_H */
+
diff --git a/libs/libevent/docs/sample/http-connect.c b/libs/libevent/docs/sample/http-connect.c
new file mode 100644
index 0000000000..d6c7b5ea0b
--- /dev/null
+++ b/libs/libevent/docs/sample/http-connect.c
@@ -0,0 +1,119 @@
+#include "event2/event-config.h"
+
+#include <event2/event.h>
+#include <event2/http.h>
+#include <event2/http_struct.h>
+#include <event2/buffer.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <limits.h>
+
+#define VERIFY(cond) do { \
+ if (!(cond)) { \
+ fprintf(stderr, "[error] %s\n", #cond); \
+ } \
+} while (0); \
+
+#define URL_MAX 4096
+
+struct connect_base
+{
+ struct evhttp_connection *evcon;
+ struct evhttp_uri *location;
+};
+
+static void get_cb(struct evhttp_request *req, void *arg)
+{
+ ev_ssize_t len;
+ struct evbuffer *evbuf;
+
+ VERIFY(req);
+
+ evbuf = evhttp_request_get_input_buffer(req);
+ len = evbuffer_get_length(evbuf);
+ fwrite(evbuffer_pullup(evbuf, len), len, 1, stdout);
+ evbuffer_drain(evbuf, len);
+}
+
+static void connect_cb(struct evhttp_request *proxy_req, void *arg)
+{
+ char buffer[URL_MAX];
+
+ struct connect_base *base = arg;
+ struct evhttp_connection *evcon = base->evcon;
+ struct evhttp_uri *location = base->location;
+
+ VERIFY(proxy_req);
+ if (evcon) {
+ struct evhttp_request *req = evhttp_request_new(get_cb, NULL);
+ evhttp_add_header(req->output_headers, "Connection", "close");
+ VERIFY(!evhttp_make_request(evcon, req, EVHTTP_REQ_GET,
+ evhttp_uri_join(location, buffer, URL_MAX)));
+ }
+}
+
+int main(int argc, const char **argv)
+{
+ char buffer[URL_MAX];
+
+ struct evhttp_uri *host_port;
+ struct evhttp_uri *location;
+ struct evhttp_uri *proxy;
+
+ struct event_base *base;
+ struct evhttp_connection *evcon;
+ struct evhttp_request *req;
+
+ struct connect_base connect_base;
+
+ if (argc != 3) {
+ printf("Usage: %s proxy url\n", argv[0]);
+ return 1;
+ }
+
+ {
+ proxy = evhttp_uri_parse(argv[1]);
+ VERIFY(evhttp_uri_get_host(proxy));
+ VERIFY(evhttp_uri_get_port(proxy) > 0);
+ }
+ {
+ host_port = evhttp_uri_parse(argv[2]);
+ evhttp_uri_set_scheme(host_port, NULL);
+ evhttp_uri_set_userinfo(host_port, NULL);
+ evhttp_uri_set_path(host_port, NULL);
+ evhttp_uri_set_query(host_port, NULL);
+ evhttp_uri_set_fragment(host_port, NULL);
+ VERIFY(evhttp_uri_get_host(host_port));
+ VERIFY(evhttp_uri_get_port(host_port) > 0);
+ }
+ {
+ location = evhttp_uri_parse(argv[2]);
+ evhttp_uri_set_scheme(location, NULL);
+ evhttp_uri_set_userinfo(location, 0);
+ evhttp_uri_set_host(location, NULL);
+ evhttp_uri_set_port(location, -1);
+ }
+
+ VERIFY(base = event_base_new());
+ VERIFY(evcon = evhttp_connection_base_new(base, NULL,
+ evhttp_uri_get_host(proxy), evhttp_uri_get_port(proxy)));
+ connect_base = (struct connect_base){
+ .evcon = evcon,
+ .location = location,
+ };
+ VERIFY(req = evhttp_request_new(connect_cb, &connect_base));
+
+ evhttp_add_header(req->output_headers, "Connection", "keep-alive");
+ evhttp_add_header(req->output_headers, "Proxy-Connection", "keep-alive");
+ evutil_snprintf(buffer, URL_MAX, "%s:%d",
+ evhttp_uri_get_host(host_port), evhttp_uri_get_port(host_port));
+ evhttp_make_request(evcon, req, EVHTTP_REQ_CONNECT, buffer);
+
+ event_base_dispatch(base);
+ evhttp_connection_free(evcon);
+ event_base_free(base);
+ evhttp_uri_free(proxy);
+ evhttp_uri_free(host_port);
+ evhttp_uri_free(location);
+ return 0;
+}
diff --git a/libs/libevent/docs/sample/http-server.c b/libs/libevent/docs/sample/http-server.c
new file mode 100644
index 0000000000..cbb9c914dd
--- /dev/null
+++ b/libs/libevent/docs/sample/http-server.c
@@ -0,0 +1,418 @@
+/*
+ A trivial static http webserver using Libevent's evhttp.
+
+ This is not the best code in the world, and it does some fairly stupid stuff
+ that you would never want to do in a production webserver. Caveat hackor!
+
+ */
+
+/* Compatibility for possible missing IPv6 declarations */
+#include "../util-internal.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#include <windows.h>
+#include <io.h>
+#include <fcntl.h>
+#ifndef S_ISDIR
+#define S_ISDIR(x) (((x) & S_IFMT) == S_IFDIR)
+#endif
+#else
+#include <sys/stat.h>
+#include <sys/socket.h>
+#include <signal.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <dirent.h>
+#endif
+
+#include <event2/event.h>
+#include <event2/http.h>
+#include <event2/buffer.h>
+#include <event2/util.h>
+#include <event2/keyvalq_struct.h>
+
+#ifdef EVENT__HAVE_NETINET_IN_H
+#include <netinet/in.h>
+# ifdef _XOPEN_SOURCE_EXTENDED
+# include <arpa/inet.h>
+# endif
+#endif
+
+#ifdef _WIN32
+#ifndef stat
+#define stat _stat
+#endif
+#ifndef fstat
+#define fstat _fstat
+#endif
+#ifndef open
+#define open _open
+#endif
+#ifndef close
+#define close _close
+#endif
+#ifndef O_RDONLY
+#define O_RDONLY _O_RDONLY
+#endif
+#endif
+
+char uri_root[512];
+
+static const struct table_entry {
+ const char *extension;
+ const char *content_type;
+} content_type_table[] = {
+ { "txt", "text/plain" },
+ { "c", "text/plain" },
+ { "h", "text/plain" },
+ { "html", "text/html" },
+ { "htm", "text/htm" },
+ { "css", "text/css" },
+ { "gif", "image/gif" },
+ { "jpg", "image/jpeg" },
+ { "jpeg", "image/jpeg" },
+ { "png", "image/png" },
+ { "pdf", "application/pdf" },
+ { "ps", "application/postsript" },
+ { NULL, NULL },
+};
+
+/* Try to guess a good content-type for 'path' */
+static const char *
+guess_content_type(const char *path)
+{
+ const char *last_period, *extension;
+ const struct table_entry *ent;
+ last_period = strrchr(path, '.');
+ if (!last_period || strchr(last_period, '/'))
+ goto not_found; /* no exension */
+ extension = last_period + 1;
+ for (ent = &content_type_table[0]; ent->extension; ++ent) {
+ if (!evutil_ascii_strcasecmp(ent->extension, extension))
+ return ent->content_type;
+ }
+
+not_found:
+ return "application/misc";
+}
+
+/* Callback used for the /dump URI, and for every non-GET request:
+ * dumps all information to stdout and gives back a trivial 200 ok */
+static void
+dump_request_cb(struct evhttp_request *req, void *arg)
+{
+ const char *cmdtype;
+ struct evkeyvalq *headers;
+ struct evkeyval *header;
+ struct evbuffer *buf;
+
+ switch (evhttp_request_get_command(req)) {
+ case EVHTTP_REQ_GET: cmdtype = "GET"; break;
+ case EVHTTP_REQ_POST: cmdtype = "POST"; break;
+ case EVHTTP_REQ_HEAD: cmdtype = "HEAD"; break;
+ case EVHTTP_REQ_PUT: cmdtype = "PUT"; break;
+ case EVHTTP_REQ_DELETE: cmdtype = "DELETE"; break;
+ case EVHTTP_REQ_OPTIONS: cmdtype = "OPTIONS"; break;
+ case EVHTTP_REQ_TRACE: cmdtype = "TRACE"; break;
+ case EVHTTP_REQ_CONNECT: cmdtype = "CONNECT"; break;
+ case EVHTTP_REQ_PATCH: cmdtype = "PATCH"; break;
+ default: cmdtype = "unknown"; break;
+ }
+
+ printf("Received a %s request for %s\nHeaders:\n",
+ cmdtype, evhttp_request_get_uri(req));
+
+ headers = evhttp_request_get_input_headers(req);
+ for (header = headers->tqh_first; header;
+ header = header->next.tqe_next) {
+ printf(" %s: %s\n", header->key, header->value);
+ }
+
+ buf = evhttp_request_get_input_buffer(req);
+ puts("Input data: <<<");
+ while (evbuffer_get_length(buf)) {
+ int n;
+ char cbuf[128];
+ n = evbuffer_remove(buf, cbuf, sizeof(cbuf));
+ if (n > 0)
+ (void) fwrite(cbuf, 1, n, stdout);
+ }
+ puts(">>>");
+
+ evhttp_send_reply(req, 200, "OK", NULL);
+}
+
+/* This callback gets invoked when we get any http request that doesn't match
+ * any other callback. Like any evhttp server callback, it has a simple job:
+ * it must eventually call evhttp_send_error() or evhttp_send_reply().
+ */
+static void
+send_document_cb(struct evhttp_request *req, void *arg)
+{
+ struct evbuffer *evb = NULL;
+ const char *docroot = arg;
+ const char *uri = evhttp_request_get_uri(req);
+ struct evhttp_uri *decoded = NULL;
+ const char *path;
+ char *decoded_path;
+ char *whole_path = NULL;
+ size_t len;
+ int fd = -1;
+ struct stat st;
+
+ if (evhttp_request_get_command(req) != EVHTTP_REQ_GET) {
+ dump_request_cb(req, arg);
+ return;
+ }
+
+ printf("Got a GET request for <%s>\n", uri);
+
+ /* Decode the URI */
+ decoded = evhttp_uri_parse(uri);
+ if (!decoded) {
+ printf("It's not a good URI. Sending BADREQUEST\n");
+ evhttp_send_error(req, HTTP_BADREQUEST, 0);
+ return;
+ }
+
+ /* Let's see what path the user asked for. */
+ path = evhttp_uri_get_path(decoded);
+ if (!path) path = "/";
+
+ /* We need to decode it, to see what path the user really wanted. */
+ decoded_path = evhttp_uridecode(path, 0, NULL);
+ if (decoded_path == NULL)
+ goto err;
+ /* Don't allow any ".."s in the path, to avoid exposing stuff outside
+ * of the docroot. This test is both overzealous and underzealous:
+ * it forbids aceptable paths like "/this/one..here", but it doesn't
+ * do anything to prevent symlink following." */
+ if (strstr(decoded_path, ".."))
+ goto err;
+
+ len = strlen(decoded_path)+strlen(docroot)+2;
+ if (!(whole_path = malloc(len))) {
+ perror("malloc");
+ goto err;
+ }
+ evutil_snprintf(whole_path, len, "%s/%s", docroot, decoded_path);
+
+ if (stat(whole_path, &st)<0) {
+ goto err;
+ }
+
+ /* This holds the content we're sending. */
+ evb = evbuffer_new();
+
+ if (S_ISDIR(st.st_mode)) {
+ /* If it's a directory, read the comments and make a little
+ * index page */
+#ifdef _WIN32
+ HANDLE d;
+ WIN32_FIND_DATAA ent;
+ char *pattern;
+ size_t dirlen;
+#else
+ DIR *d;
+ struct dirent *ent;
+#endif
+ const char *trailing_slash = "";
+
+ if (!strlen(path) || path[strlen(path)-1] != '/')
+ trailing_slash = "/";
+
+#ifdef _WIN32
+ dirlen = strlen(whole_path);
+ pattern = malloc(dirlen+3);
+ memcpy(pattern, whole_path, dirlen);
+ pattern[dirlen] = '\\';
+ pattern[dirlen+1] = '*';
+ pattern[dirlen+2] = '\0';
+ d = FindFirstFileA(pattern, &ent);
+ free(pattern);
+ if (d == INVALID_HANDLE_VALUE)
+ goto err;
+#else
+ if (!(d = opendir(whole_path)))
+ goto err;
+#endif
+
+ evbuffer_add_printf(evb,
+ "<!DOCTYPE html>\n"
+ "<html>\n <head>\n"
+ " <meta charset='utf-8'>\n"
+ " <title>%s</title>\n"
+ " <base href='%s%s'>\n"
+ " </head>\n"
+ " <body>\n"
+ " <h1>%s</h1>\n"
+ " <ul>\n",
+ decoded_path, /* XXX html-escape this. */
+ path, /* XXX html-escape this? */
+ trailing_slash,
+ decoded_path /* XXX html-escape this */);
+#ifdef _WIN32
+ do {
+ const char *name = ent.cFileName;
+#else
+ while ((ent = readdir(d))) {
+ const char *name = ent->d_name;
+#endif
+ evbuffer_add_printf(evb,
+ " <li><a href=\"%s\">%s</a>\n",
+ name, name);/* XXX escape this */
+#ifdef _WIN32
+ } while (FindNextFileA(d, &ent));
+#else
+ }
+#endif
+ evbuffer_add_printf(evb, "</ul></body></html>\n");
+#ifdef _WIN32
+ FindClose(d);
+#else
+ closedir(d);
+#endif
+ evhttp_add_header(evhttp_request_get_output_headers(req),
+ "Content-Type", "text/html");
+ } else {
+ /* Otherwise it's a file; add it to the buffer to get
+ * sent via sendfile */
+ const char *type = guess_content_type(decoded_path);
+ if ((fd = open(whole_path, O_RDONLY)) < 0) {
+ perror("open");
+ goto err;
+ }
+
+ if (fstat(fd, &st)<0) {
+ /* Make sure the length still matches, now that we
+ * opened the file :/ */
+ perror("fstat");
+ goto err;
+ }
+ evhttp_add_header(evhttp_request_get_output_headers(req),
+ "Content-Type", type);
+ evbuffer_add_file(evb, fd, 0, st.st_size);
+ }
+
+ evhttp_send_reply(req, 200, "OK", evb);
+ goto done;
+err:
+ evhttp_send_error(req, 404, "Document was not found");
+ if (fd>=0)
+ close(fd);
+done:
+ if (decoded)
+ evhttp_uri_free(decoded);
+ if (decoded_path)
+ free(decoded_path);
+ if (whole_path)
+ free(whole_path);
+ if (evb)
+ evbuffer_free(evb);
+}
+
+static void
+syntax(void)
+{
+ fprintf(stdout, "Syntax: http-server <docroot>\n");
+}
+
+int
+main(int argc, char **argv)
+{
+ struct event_base *base;
+ struct evhttp *http;
+ struct evhttp_bound_socket *handle;
+
+ unsigned short port = 0;
+#ifdef _WIN32
+ WSADATA WSAData;
+ WSAStartup(0x101, &WSAData);
+#else
+ if (signal(SIGPIPE, SIG_IGN) == SIG_ERR)
+ return (1);
+#endif
+ if (argc < 2) {
+ syntax();
+ return 1;
+ }
+
+ base = event_base_new();
+ if (!base) {
+ fprintf(stderr, "Couldn't create an event_base: exiting\n");
+ return 1;
+ }
+
+ /* Create a new evhttp object to handle requests. */
+ http = evhttp_new(base);
+ if (!http) {
+ fprintf(stderr, "couldn't create evhttp. Exiting.\n");
+ return 1;
+ }
+
+ /* The /dump URI will dump all requests to stdout and say 200 ok. */
+ evhttp_set_cb(http, "/dump", dump_request_cb, NULL);
+
+ /* We want to accept arbitrary requests, so we need to set a "generic"
+ * cb. We can also add callbacks for specific paths. */
+ evhttp_set_gencb(http, send_document_cb, argv[1]);
+
+ /* Now we tell the evhttp what port to listen on */
+ handle = evhttp_bind_socket_with_handle(http, "0.0.0.0", port);
+ if (!handle) {
+ fprintf(stderr, "couldn't bind to port %d. Exiting.\n",
+ (int)port);
+ return 1;
+ }
+
+ {
+ /* Extract and display the address we're listening on. */
+ struct sockaddr_storage ss;
+ evutil_socket_t fd;
+ ev_socklen_t socklen = sizeof(ss);
+ char addrbuf[128];
+ void *inaddr;
+ const char *addr;
+ int got_port = -1;
+ fd = evhttp_bound_socket_get_fd(handle);
+ memset(&ss, 0, sizeof(ss));
+ if (getsockname(fd, (struct sockaddr *)&ss, &socklen)) {
+ perror("getsockname() failed");
+ return 1;
+ }
+ if (ss.ss_family == AF_INET) {
+ got_port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
+ inaddr = &((struct sockaddr_in*)&ss)->sin_addr;
+ } else if (ss.ss_family == AF_INET6) {
+ got_port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
+ inaddr = &((struct sockaddr_in6*)&ss)->sin6_addr;
+ } else {
+ fprintf(stderr, "Weird address family %d\n",
+ ss.ss_family);
+ return 1;
+ }
+ addr = evutil_inet_ntop(ss.ss_family, inaddr, addrbuf,
+ sizeof(addrbuf));
+ if (addr) {
+ printf("Listening on %s:%d\n", addr, got_port);
+ evutil_snprintf(uri_root, sizeof(uri_root),
+ "http://%s:%d",addr,got_port);
+ } else {
+ fprintf(stderr, "evutil_inet_ntop failed\n");
+ return 1;
+ }
+ }
+
+ event_base_dispatch(base);
+
+ return 0;
+}
diff --git a/libs/libevent/docs/sample/https-client.c b/libs/libevent/docs/sample/https-client.c
new file mode 100644
index 0000000000..029cd19c75
--- /dev/null
+++ b/libs/libevent/docs/sample/https-client.c
@@ -0,0 +1,494 @@
+/*
+ This is an example of how to hook up evhttp with bufferevent_ssl
+
+ It just GETs an https URL given on the command-line and prints the response
+ body to stdout.
+
+ Actually, it also accepts plain http URLs to make it easy to compare http vs
+ https code paths.
+
+ Loosely based on le-proxy.c.
+ */
+
+// Get rid of OSX 10.7 and greater deprecation warnings.
+#if defined(__APPLE__) && defined(__clang__)
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#endif
+
+#include <stdio.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <ws2tcpip.h>
+
+#define snprintf _snprintf
+#define strcasecmp _stricmp
+#else
+#include <sys/socket.h>
+#include <netinet/in.h>
+#endif
+
+#include <event2/bufferevent_ssl.h>
+#include <event2/bufferevent.h>
+#include <event2/buffer.h>
+#include <event2/listener.h>
+#include <event2/util.h>
+#include <event2/http.h>
+
+#include <openssl/ssl.h>
+#include <openssl/err.h>
+#include <openssl/rand.h>
+
+#include "openssl_hostname_validation.h"
+
+static struct event_base *base;
+static int ignore_cert = 0;
+
+static void
+http_request_done(struct evhttp_request *req, void *ctx)
+{
+ char buffer[256];
+ int nread;
+
+ if (req == NULL) {
+ /* If req is NULL, it means an error occurred, but
+ * sadly we are mostly left guessing what the error
+ * might have been. We'll do our best... */
+ struct bufferevent *bev = (struct bufferevent *) ctx;
+ unsigned long oslerr;
+ int printed_err = 0;
+ int errcode = EVUTIL_SOCKET_ERROR();
+ fprintf(stderr, "some request failed - no idea which one though!\n");
+ /* Print out the OpenSSL error queue that libevent
+ * squirreled away for us, if any. */
+ while ((oslerr = bufferevent_get_openssl_error(bev))) {
+ ERR_error_string_n(oslerr, buffer, sizeof(buffer));
+ fprintf(stderr, "%s\n", buffer);
+ printed_err = 1;
+ }
+ /* If the OpenSSL error queue was empty, maybe it was a
+ * socket error; let's try printing that. */
+ if (! printed_err)
+ fprintf(stderr, "socket error = %s (%d)\n",
+ evutil_socket_error_to_string(errcode),
+ errcode);
+ return;
+ }
+
+ fprintf(stderr, "Response line: %d %s\n",
+ evhttp_request_get_response_code(req),
+ evhttp_request_get_response_code_line(req));
+
+ while ((nread = evbuffer_remove(evhttp_request_get_input_buffer(req),
+ buffer, sizeof(buffer)))
+ > 0) {
+ /* These are just arbitrary chunks of 256 bytes.
+ * They are not lines, so we can't treat them as such. */
+ fwrite(buffer, nread, 1, stdout);
+ }
+}
+
+static void
+syntax(void)
+{
+ fputs("Syntax:\n", stderr);
+ fputs(" https-client -url <https-url> [-data data-file.bin] [-ignore-cert] [-retries num] [-timeout sec] [-crt crt]\n", stderr);
+ fputs("Example:\n", stderr);
+ fputs(" https-client -url https://ip.appspot.com/\n", stderr);
+}
+
+static void
+err(const char *msg)
+{
+ fputs(msg, stderr);
+}
+
+static void
+err_openssl(const char *func)
+{
+ fprintf (stderr, "%s failed:\n", func);
+
+ /* This is the OpenSSL function that prints the contents of the
+ * error stack to the specified file handle. */
+ ERR_print_errors_fp (stderr);
+
+ exit(1);
+}
+
+/* See http://archives.seul.org/libevent/users/Jan-2013/msg00039.html */
+static int cert_verify_callback(X509_STORE_CTX *x509_ctx, void *arg)
+{
+ char cert_str[256];
+ const char *host = (const char *) arg;
+ const char *res_str = "X509_verify_cert failed";
+ HostnameValidationResult res = Error;
+
+ /* This is the function that OpenSSL would call if we hadn't called
+ * SSL_CTX_set_cert_verify_callback(). Therefore, we are "wrapping"
+ * the default functionality, rather than replacing it. */
+ int ok_so_far = 0;
+
+ X509 *server_cert = NULL;
+
+ if (ignore_cert) {
+ return 1;
+ }
+
+ ok_so_far = X509_verify_cert(x509_ctx);
+
+ server_cert = X509_STORE_CTX_get_current_cert(x509_ctx);
+
+ if (ok_so_far) {
+ res = validate_hostname(host, server_cert);
+
+ switch (res) {
+ case MatchFound:
+ res_str = "MatchFound";
+ break;
+ case MatchNotFound:
+ res_str = "MatchNotFound";
+ break;
+ case NoSANPresent:
+ res_str = "NoSANPresent";
+ break;
+ case MalformedCertificate:
+ res_str = "MalformedCertificate";
+ break;
+ case Error:
+ res_str = "Error";
+ break;
+ default:
+ res_str = "WTF!";
+ break;
+ }
+ }
+
+ X509_NAME_oneline(X509_get_subject_name (server_cert),
+ cert_str, sizeof (cert_str));
+
+ if (res == MatchFound) {
+ printf("https server '%s' has this certificate, "
+ "which looks good to me:\n%s\n",
+ host, cert_str);
+ return 1;
+ } else {
+ printf("Got '%s' for hostname '%s' and certificate:\n%s\n",
+ res_str, host, cert_str);
+ return 0;
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ int r;
+
+ struct evhttp_uri *http_uri = NULL;
+ const char *url = NULL, *data_file = NULL;
+ const char *crt = "/etc/ssl/certs/ca-certificates.crt";
+ const char *scheme, *host, *path, *query;
+ char uri[256];
+ int port;
+ int retries = 0;
+ int timeout = -1;
+
+ SSL_CTX *ssl_ctx = NULL;
+ SSL *ssl = NULL;
+ struct bufferevent *bev;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req;
+ struct evkeyvalq *output_headers;
+ struct evbuffer *output_buffer;
+
+ int i;
+ int ret = 0;
+ enum { HTTP, HTTPS } type = HTTP;
+
+ for (i = 1; i < argc; i++) {
+ if (!strcmp("-url", argv[i])) {
+ if (i < argc - 1) {
+ url = argv[i + 1];
+ } else {
+ syntax();
+ goto error;
+ }
+ } else if (!strcmp("-crt", argv[i])) {
+ if (i < argc - 1) {
+ crt = argv[i + 1];
+ } else {
+ syntax();
+ goto error;
+ }
+ } else if (!strcmp("-ignore-cert", argv[i])) {
+ ignore_cert = 1;
+ } else if (!strcmp("-data", argv[i])) {
+ if (i < argc - 1) {
+ data_file = argv[i + 1];
+ } else {
+ syntax();
+ goto error;
+ }
+ } else if (!strcmp("-retries", argv[i])) {
+ if (i < argc - 1) {
+ retries = atoi(argv[i + 1]);
+ } else {
+ syntax();
+ goto error;
+ }
+ } else if (!strcmp("-timeout", argv[i])) {
+ if (i < argc - 1) {
+ timeout = atoi(argv[i + 1]);
+ } else {
+ syntax();
+ goto error;
+ }
+ } else if (!strcmp("-help", argv[i])) {
+ syntax();
+ goto error;
+ }
+ }
+
+ if (!url) {
+ syntax();
+ goto error;
+ }
+
+#ifdef _WIN32
+ {
+ WORD wVersionRequested;
+ WSADATA wsaData;
+ int err;
+
+ wVersionRequested = MAKEWORD(2, 2);
+
+ err = WSAStartup(wVersionRequested, &wsaData);
+ if (err != 0) {
+ printf("WSAStartup failed with error: %d\n", err);
+ goto error;
+ }
+ }
+#endif // _WIN32
+
+ http_uri = evhttp_uri_parse(url);
+ if (http_uri == NULL) {
+ err("malformed url");
+ goto error;
+ }
+
+ scheme = evhttp_uri_get_scheme(http_uri);
+ if (scheme == NULL || (strcasecmp(scheme, "https") != 0 &&
+ strcasecmp(scheme, "http") != 0)) {
+ err("url must be http or https");
+ goto error;
+ }
+
+ host = evhttp_uri_get_host(http_uri);
+ if (host == NULL) {
+ err("url must have a host");
+ goto error;
+ }
+
+ port = evhttp_uri_get_port(http_uri);
+ if (port == -1) {
+ port = (strcasecmp(scheme, "http") == 0) ? 80 : 443;
+ }
+
+ path = evhttp_uri_get_path(http_uri);
+ if (strlen(path) == 0) {
+ path = "/";
+ }
+
+ query = evhttp_uri_get_query(http_uri);
+ if (query == NULL) {
+ snprintf(uri, sizeof(uri) - 1, "%s", path);
+ } else {
+ snprintf(uri, sizeof(uri) - 1, "%s?%s", path, query);
+ }
+ uri[sizeof(uri) - 1] = '\0';
+
+ // Initialize OpenSSL
+ SSL_library_init();
+ ERR_load_crypto_strings();
+ SSL_load_error_strings();
+ OpenSSL_add_all_algorithms();
+
+ /* This isn't strictly necessary... OpenSSL performs RAND_poll
+ * automatically on first use of random number generator. */
+ r = RAND_poll();
+ if (r == 0) {
+ err_openssl("RAND_poll");
+ goto error;
+ }
+
+ /* Create a new OpenSSL context */
+ ssl_ctx = SSL_CTX_new(SSLv23_method());
+ if (!ssl_ctx) {
+ err_openssl("SSL_CTX_new");
+ goto error;
+ }
+
+#ifndef _WIN32
+ /* TODO: Add certificate loading on Windows as well */
+
+ /* Attempt to use the system's trusted root certificates.
+ * (This path is only valid for Debian-based systems.) */
+ if (1 != SSL_CTX_load_verify_locations(ssl_ctx, crt, NULL)) {
+ err_openssl("SSL_CTX_load_verify_locations");
+ goto error;
+ }
+ /* Ask OpenSSL to verify the server certificate. Note that this
+ * does NOT include verifying that the hostname is correct.
+ * So, by itself, this means anyone with any legitimate
+ * CA-issued certificate for any website, can impersonate any
+ * other website in the world. This is not good. See "The
+ * Most Dangerous Code in the World" article at
+ * https://crypto.stanford.edu/~dabo/pubs/abstracts/ssl-client-bugs.html
+ */
+ SSL_CTX_set_verify(ssl_ctx, SSL_VERIFY_PEER, NULL);
+ /* This is how we solve the problem mentioned in the previous
+ * comment. We "wrap" OpenSSL's validation routine in our
+ * own routine, which also validates the hostname by calling
+ * the code provided by iSECPartners. Note that even though
+ * the "Everything You've Always Wanted to Know About
+ * Certificate Validation With OpenSSL (But Were Afraid to
+ * Ask)" paper from iSECPartners says very explicitly not to
+ * call SSL_CTX_set_cert_verify_callback (at the bottom of
+ * page 2), what we're doing here is safe because our
+ * cert_verify_callback() calls X509_verify_cert(), which is
+ * OpenSSL's built-in routine which would have been called if
+ * we hadn't set the callback. Therefore, we're just
+ * "wrapping" OpenSSL's routine, not replacing it. */
+ SSL_CTX_set_cert_verify_callback(ssl_ctx, cert_verify_callback,
+ (void *) host);
+#endif // not _WIN32
+
+ // Create event base
+ base = event_base_new();
+ if (!base) {
+ perror("event_base_new()");
+ goto error;
+ }
+
+ // Create OpenSSL bufferevent and stack evhttp on top of it
+ ssl = SSL_new(ssl_ctx);
+ if (ssl == NULL) {
+ err_openssl("SSL_new()");
+ goto error;
+ }
+
+ #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
+ // Set hostname for SNI extension
+ SSL_set_tlsext_host_name(ssl, host);
+ #endif
+
+ if (strcasecmp(scheme, "http") == 0) {
+ bev = bufferevent_socket_new(base, -1, BEV_OPT_CLOSE_ON_FREE);
+ } else {
+ type = HTTPS;
+ bev = bufferevent_openssl_socket_new(base, -1, ssl,
+ BUFFEREVENT_SSL_CONNECTING,
+ BEV_OPT_CLOSE_ON_FREE|BEV_OPT_DEFER_CALLBACKS);
+ }
+
+ if (bev == NULL) {
+ fprintf(stderr, "bufferevent_openssl_socket_new() failed\n");
+ goto error;
+ }
+
+ bufferevent_openssl_set_allow_dirty_shutdown(bev, 1);
+
+ // For simplicity, we let DNS resolution block. Everything else should be
+ // asynchronous though.
+ evcon = evhttp_connection_base_bufferevent_new(base, NULL, bev,
+ host, port);
+ if (evcon == NULL) {
+ fprintf(stderr, "evhttp_connection_base_bufferevent_new() failed\n");
+ goto error;
+ }
+
+ if (retries > 0) {
+ evhttp_connection_set_retries(evcon, retries);
+ }
+ if (timeout >= 0) {
+ evhttp_connection_set_timeout(evcon, timeout);
+ }
+
+ // Fire off the request
+ req = evhttp_request_new(http_request_done, bev);
+ if (req == NULL) {
+ fprintf(stderr, "evhttp_request_new() failed\n");
+ goto error;
+ }
+
+ output_headers = evhttp_request_get_output_headers(req);
+ evhttp_add_header(output_headers, "Host", host);
+ evhttp_add_header(output_headers, "Connection", "close");
+
+ if (data_file) {
+ /* NOTE: In production code, you'd probably want to use
+ * evbuffer_add_file() or evbuffer_add_file_segment(), to
+ * avoid needless copying. */
+ FILE * f = fopen(data_file, "rb");
+ char buf[1024];
+ size_t s;
+ size_t bytes = 0;
+
+ if (!f) {
+ syntax();
+ goto error;
+ }
+
+ output_buffer = evhttp_request_get_output_buffer(req);
+ while ((s = fread(buf, 1, sizeof(buf), f)) > 0) {
+ evbuffer_add(output_buffer, buf, s);
+ bytes += s;
+ }
+ evutil_snprintf(buf, sizeof(buf)-1, "%lu", (unsigned long)bytes);
+ evhttp_add_header(output_headers, "Content-Length", buf);
+ fclose(f);
+ }
+
+ r = evhttp_make_request(evcon, req, data_file ? EVHTTP_REQ_POST : EVHTTP_REQ_GET, uri);
+ if (r != 0) {
+ fprintf(stderr, "evhttp_make_request() failed\n");
+ goto error;
+ }
+
+ event_base_dispatch(base);
+ goto cleanup;
+
+error:
+ ret = 1;
+cleanup:
+ if (evcon)
+ evhttp_connection_free(evcon);
+ if (http_uri)
+ evhttp_uri_free(http_uri);
+ event_base_free(base);
+
+ if (ssl_ctx)
+ SSL_CTX_free(ssl_ctx);
+ if (type == HTTP && ssl)
+ SSL_free(ssl);
+ EVP_cleanup();
+ ERR_free_strings();
+
+#ifdef EVENT__HAVE_ERR_REMOVE_THREAD_STATE
+ ERR_remove_thread_state(NULL);
+#else
+ ERR_remove_state(0);
+#endif
+ CRYPTO_cleanup_all_ex_data();
+
+ sk_SSL_COMP_free(SSL_COMP_get_compression_methods());
+
+#ifdef _WIN32
+ WSACleanup();
+#endif
+
+ return ret;
+}
diff --git a/libs/libevent/docs/sample/include.am b/libs/libevent/docs/sample/include.am
new file mode 100644
index 0000000000..d1a7242f7c
--- /dev/null
+++ b/libs/libevent/docs/sample/include.am
@@ -0,0 +1,53 @@
+# sample/include.am for libevent
+# Copyright 2000-2007 Niels Provos
+# Copyright 2007-2012 Niels Provos and Nick Mathewson
+#
+# See LICENSE for copying information.
+
+SAMPLES = \
+ sample/dns-example \
+ sample/event-read-fifo \
+ sample/hello-world \
+ sample/http-server \
+ sample/http-connect \
+ sample/signal-test \
+ sample/time-test
+
+if OPENSSL
+SAMPLES += sample/le-proxy
+sample_le_proxy_SOURCES = sample/le-proxy.c
+sample_le_proxy_LDADD = libevent.la libevent_openssl.la ${OPENSSL_LIBS} ${OPENSSL_LIBADD}
+sample_le_proxy_INCLUDES = $(OPENSSL_INCS)
+
+SAMPLES += sample/https-client
+sample_https_client_SOURCES = \
+ sample/https-client.c \
+ sample/hostcheck.c \
+ sample/openssl_hostname_validation.c
+sample_https_client_LDADD = libevent.la libevent_openssl.la ${OPENSSL_LIBS} ${OPENSSL_LIBADD}
+sample_https_client_INCLUDES = $(OPENSSL_INCS)
+noinst_HEADERS += \
+ sample/hostcheck.h \
+ sample/openssl_hostname_validation.h
+endif
+
+if BUILD_SAMPLES
+noinst_PROGRAMS += $(SAMPLES)
+endif
+
+$(SAMPLES) : libevent.la
+
+sample_event_read_fifo_SOURCES = sample/event-read-fifo.c
+sample_event_read_fifo_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la
+sample_time_test_SOURCES = sample/time-test.c
+sample_time_test_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la
+sample_signal_test_SOURCES = sample/signal-test.c
+sample_signal_test_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la
+sample_dns_example_SOURCES = sample/dns-example.c
+sample_dns_example_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la
+sample_hello_world_SOURCES = sample/hello-world.c
+sample_hello_world_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la
+sample_http_server_SOURCES = sample/http-server.c
+sample_http_server_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la
+sample_http_connect_SOURCES = sample/http-connect.c
+sample_http_connect_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la
diff --git a/libs/libevent/docs/sample/le-proxy.c b/libs/libevent/docs/sample/le-proxy.c
new file mode 100644
index 0000000000..30e0a5f6b9
--- /dev/null
+++ b/libs/libevent/docs/sample/le-proxy.c
@@ -0,0 +1,288 @@
+/*
+ This example code shows how to write an (optionally encrypting) SSL proxy
+ with Libevent's bufferevent layer.
+
+ XXX It's a little ugly and should probably be cleaned up.
+ */
+
+// Get rid of OSX 10.7 and greater deprecation warnings.
+#if defined(__APPLE__) && defined(__clang__)
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#endif
+
+#include <stdio.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#else
+#include <sys/socket.h>
+#include <netinet/in.h>
+#endif
+
+#include <event2/bufferevent_ssl.h>
+#include <event2/bufferevent.h>
+#include <event2/buffer.h>
+#include <event2/listener.h>
+#include <event2/util.h>
+
+#include <openssl/ssl.h>
+#include <openssl/err.h>
+#include <openssl/rand.h>
+
+static struct event_base *base;
+static struct sockaddr_storage listen_on_addr;
+static struct sockaddr_storage connect_to_addr;
+static int connect_to_addrlen;
+static int use_wrapper = 1;
+
+static SSL_CTX *ssl_ctx = NULL;
+
+#define MAX_OUTPUT (512*1024)
+
+static void drained_writecb(struct bufferevent *bev, void *ctx);
+static void eventcb(struct bufferevent *bev, short what, void *ctx);
+
+static void
+readcb(struct bufferevent *bev, void *ctx)
+{
+ struct bufferevent *partner = ctx;
+ struct evbuffer *src, *dst;
+ size_t len;
+ src = bufferevent_get_input(bev);
+ len = evbuffer_get_length(src);
+ if (!partner) {
+ evbuffer_drain(src, len);
+ return;
+ }
+ dst = bufferevent_get_output(partner);
+ evbuffer_add_buffer(dst, src);
+
+ if (evbuffer_get_length(dst) >= MAX_OUTPUT) {
+ /* We're giving the other side data faster than it can
+ * pass it on. Stop reading here until we have drained the
+ * other side to MAX_OUTPUT/2 bytes. */
+ bufferevent_setcb(partner, readcb, drained_writecb,
+ eventcb, bev);
+ bufferevent_setwatermark(partner, EV_WRITE, MAX_OUTPUT/2,
+ MAX_OUTPUT);
+ bufferevent_disable(bev, EV_READ);
+ }
+}
+
+static void
+drained_writecb(struct bufferevent *bev, void *ctx)
+{
+ struct bufferevent *partner = ctx;
+
+ /* We were choking the other side until we drained our outbuf a bit.
+ * Now it seems drained. */
+ bufferevent_setcb(bev, readcb, NULL, eventcb, partner);
+ bufferevent_setwatermark(bev, EV_WRITE, 0, 0);
+ if (partner)
+ bufferevent_enable(partner, EV_READ);
+}
+
+static void
+close_on_finished_writecb(struct bufferevent *bev, void *ctx)
+{
+ struct evbuffer *b = bufferevent_get_output(bev);
+
+ if (evbuffer_get_length(b) == 0) {
+ bufferevent_free(bev);
+ }
+}
+
+static void
+eventcb(struct bufferevent *bev, short what, void *ctx)
+{
+ struct bufferevent *partner = ctx;
+
+ if (what & (BEV_EVENT_EOF|BEV_EVENT_ERROR)) {
+ if (what & BEV_EVENT_ERROR) {
+ unsigned long err;
+ while ((err = (bufferevent_get_openssl_error(bev)))) {
+ const char *msg = (const char*)
+ ERR_reason_error_string(err);
+ const char *lib = (const char*)
+ ERR_lib_error_string(err);
+ const char *func = (const char*)
+ ERR_func_error_string(err);
+ fprintf(stderr,
+ "%s in %s %s\n", msg, lib, func);
+ }
+ if (errno)
+ perror("connection error");
+ }
+
+ if (partner) {
+ /* Flush all pending data */
+ readcb(bev, ctx);
+
+ if (evbuffer_get_length(
+ bufferevent_get_output(partner))) {
+ /* We still have to flush data from the other
+ * side, but when that's done, close the other
+ * side. */
+ bufferevent_setcb(partner,
+ NULL, close_on_finished_writecb,
+ eventcb, NULL);
+ bufferevent_disable(partner, EV_READ);
+ } else {
+ /* We have nothing left to say to the other
+ * side; close it. */
+ bufferevent_free(partner);
+ }
+ }
+ bufferevent_free(bev);
+ }
+}
+
+static void
+syntax(void)
+{
+ fputs("Syntax:\n", stderr);
+ fputs(" le-proxy [-s] [-W] <listen-on-addr> <connect-to-addr>\n", stderr);
+ fputs("Example:\n", stderr);
+ fputs(" le-proxy 127.0.0.1:8888 1.2.3.4:80\n", stderr);
+
+ exit(1);
+}
+
+static void
+accept_cb(struct evconnlistener *listener, evutil_socket_t fd,
+ struct sockaddr *a, int slen, void *p)
+{
+ struct bufferevent *b_out, *b_in;
+ /* Create two linked bufferevent objects: one to connect, one for the
+ * new connection */
+ b_in = bufferevent_socket_new(base, fd,
+ BEV_OPT_CLOSE_ON_FREE|BEV_OPT_DEFER_CALLBACKS);
+
+ if (!ssl_ctx || use_wrapper)
+ b_out = bufferevent_socket_new(base, -1,
+ BEV_OPT_CLOSE_ON_FREE|BEV_OPT_DEFER_CALLBACKS);
+ else {
+ SSL *ssl = SSL_new(ssl_ctx);
+ b_out = bufferevent_openssl_socket_new(base, -1, ssl,
+ BUFFEREVENT_SSL_CONNECTING,
+ BEV_OPT_CLOSE_ON_FREE|BEV_OPT_DEFER_CALLBACKS);
+ }
+
+ assert(b_in && b_out);
+
+ if (bufferevent_socket_connect(b_out,
+ (struct sockaddr*)&connect_to_addr, connect_to_addrlen)<0) {
+ perror("bufferevent_socket_connect");
+ bufferevent_free(b_out);
+ bufferevent_free(b_in);
+ return;
+ }
+
+ if (ssl_ctx && use_wrapper) {
+ struct bufferevent *b_ssl;
+ SSL *ssl = SSL_new(ssl_ctx);
+ b_ssl = bufferevent_openssl_filter_new(base,
+ b_out, ssl, BUFFEREVENT_SSL_CONNECTING,
+ BEV_OPT_CLOSE_ON_FREE|BEV_OPT_DEFER_CALLBACKS);
+ if (!b_ssl) {
+ perror("Bufferevent_openssl_new");
+ bufferevent_free(b_out);
+ bufferevent_free(b_in);
+ }
+ b_out = b_ssl;
+ }
+
+ bufferevent_setcb(b_in, readcb, NULL, eventcb, b_out);
+ bufferevent_setcb(b_out, readcb, NULL, eventcb, b_in);
+
+ bufferevent_enable(b_in, EV_READ|EV_WRITE);
+ bufferevent_enable(b_out, EV_READ|EV_WRITE);
+}
+
+int
+main(int argc, char **argv)
+{
+ int i;
+ int socklen;
+
+ int use_ssl = 0;
+ struct evconnlistener *listener;
+
+ if (argc < 3)
+ syntax();
+
+ for (i=1; i < argc; ++i) {
+ if (!strcmp(argv[i], "-s")) {
+ use_ssl = 1;
+ } else if (!strcmp(argv[i], "-W")) {
+ use_wrapper = 0;
+ } else if (argv[i][0] == '-') {
+ syntax();
+ } else
+ break;
+ }
+
+ if (i+2 != argc)
+ syntax();
+
+ memset(&listen_on_addr, 0, sizeof(listen_on_addr));
+ socklen = sizeof(listen_on_addr);
+ if (evutil_parse_sockaddr_port(argv[i],
+ (struct sockaddr*)&listen_on_addr, &socklen)<0) {
+ int p = atoi(argv[i]);
+ struct sockaddr_in *sin = (struct sockaddr_in*)&listen_on_addr;
+ if (p < 1 || p > 65535)
+ syntax();
+ sin->sin_port = htons(p);
+ sin->sin_addr.s_addr = htonl(0x7f000001);
+ sin->sin_family = AF_INET;
+ socklen = sizeof(struct sockaddr_in);
+ }
+
+ memset(&connect_to_addr, 0, sizeof(connect_to_addr));
+ connect_to_addrlen = sizeof(connect_to_addr);
+ if (evutil_parse_sockaddr_port(argv[i+1],
+ (struct sockaddr*)&connect_to_addr, &connect_to_addrlen)<0)
+ syntax();
+
+ base = event_base_new();
+ if (!base) {
+ perror("event_base_new()");
+ return 1;
+ }
+
+ if (use_ssl) {
+ int r;
+ SSL_library_init();
+ ERR_load_crypto_strings();
+ SSL_load_error_strings();
+ OpenSSL_add_all_algorithms();
+ r = RAND_poll();
+ if (r == 0) {
+ fprintf(stderr, "RAND_poll() failed.\n");
+ return 1;
+ }
+ ssl_ctx = SSL_CTX_new(SSLv23_method());
+ }
+
+ listener = evconnlistener_new_bind(base, accept_cb, NULL,
+ LEV_OPT_CLOSE_ON_FREE|LEV_OPT_CLOSE_ON_EXEC|LEV_OPT_REUSEABLE,
+ -1, (struct sockaddr*)&listen_on_addr, socklen);
+
+ if (! listener) {
+ fprintf(stderr, "Couldn't open listener.\n");
+ event_base_free(base);
+ return 1;
+ }
+ event_base_dispatch(base);
+
+ evconnlistener_free(listener);
+ event_base_free(base);
+
+ return 0;
+}
diff --git a/libs/libevent/docs/sample/openssl_hostname_validation.c b/libs/libevent/docs/sample/openssl_hostname_validation.c
new file mode 100644
index 0000000000..00e63d1e15
--- /dev/null
+++ b/libs/libevent/docs/sample/openssl_hostname_validation.c
@@ -0,0 +1,173 @@
+/* Obtained from: https://github.com/iSECPartners/ssl-conservatory */
+
+/*
+Copyright (C) 2012, iSEC Partners.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+ */
+
+/*
+ * Helper functions to perform basic hostname validation using OpenSSL.
+ *
+ * Please read "everything-you-wanted-to-know-about-openssl.pdf" before
+ * attempting to use this code. This whitepaper describes how the code works,
+ * how it should be used, and what its limitations are.
+ *
+ * Author: Alban Diquet
+ * License: See LICENSE
+ *
+ */
+
+// Get rid of OSX 10.7 and greater deprecation warnings.
+#if defined(__APPLE__) && defined(__clang__)
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#endif
+
+#include <openssl/x509v3.h>
+#include <openssl/ssl.h>
+#include <string.h>
+
+#include "openssl_hostname_validation.h"
+#include "hostcheck.h"
+
+#define HOSTNAME_MAX_SIZE 255
+
+/**
+* Tries to find a match for hostname in the certificate's Common Name field.
+*
+* Returns MatchFound if a match was found.
+* Returns MatchNotFound if no matches were found.
+* Returns MalformedCertificate if the Common Name had a NUL character embedded in it.
+* Returns Error if the Common Name could not be extracted.
+*/
+static HostnameValidationResult matches_common_name(const char *hostname, const X509 *server_cert) {
+ int common_name_loc = -1;
+ X509_NAME_ENTRY *common_name_entry = NULL;
+ ASN1_STRING *common_name_asn1 = NULL;
+ char *common_name_str = NULL;
+
+ // Find the position of the CN field in the Subject field of the certificate
+ common_name_loc = X509_NAME_get_index_by_NID(X509_get_subject_name((X509 *) server_cert), NID_commonName, -1);
+ if (common_name_loc < 0) {
+ return Error;
+ }
+
+ // Extract the CN field
+ common_name_entry = X509_NAME_get_entry(X509_get_subject_name((X509 *) server_cert), common_name_loc);
+ if (common_name_entry == NULL) {
+ return Error;
+ }
+
+ // Convert the CN field to a C string
+ common_name_asn1 = X509_NAME_ENTRY_get_data(common_name_entry);
+ if (common_name_asn1 == NULL) {
+ return Error;
+ }
+ common_name_str = (char *) ASN1_STRING_data(common_name_asn1);
+
+ // Make sure there isn't an embedded NUL character in the CN
+ if ((size_t)ASN1_STRING_length(common_name_asn1) != strlen(common_name_str)) {
+ return MalformedCertificate;
+ }
+
+ // Compare expected hostname with the CN
+ if (Curl_cert_hostcheck(common_name_str, hostname) == CURL_HOST_MATCH) {
+ return MatchFound;
+ }
+ else {
+ return MatchNotFound;
+ }
+}
+
+
+/**
+* Tries to find a match for hostname in the certificate's Subject Alternative Name extension.
+*
+* Returns MatchFound if a match was found.
+* Returns MatchNotFound if no matches were found.
+* Returns MalformedCertificate if any of the hostnames had a NUL character embedded in it.
+* Returns NoSANPresent if the SAN extension was not present in the certificate.
+*/
+static HostnameValidationResult matches_subject_alternative_name(const char *hostname, const X509 *server_cert) {
+ HostnameValidationResult result = MatchNotFound;
+ int i;
+ int san_names_nb = -1;
+ STACK_OF(GENERAL_NAME) *san_names = NULL;
+
+ // Try to extract the names within the SAN extension from the certificate
+ san_names = X509_get_ext_d2i((X509 *) server_cert, NID_subject_alt_name, NULL, NULL);
+ if (san_names == NULL) {
+ return NoSANPresent;
+ }
+ san_names_nb = sk_GENERAL_NAME_num(san_names);
+
+ // Check each name within the extension
+ for (i=0; i<san_names_nb; i++) {
+ const GENERAL_NAME *current_name = sk_GENERAL_NAME_value(san_names, i);
+
+ if (current_name->type == GEN_DNS) {
+ // Current name is a DNS name, let's check it
+ char *dns_name = (char *) ASN1_STRING_data(current_name->d.dNSName);
+
+ // Make sure there isn't an embedded NUL character in the DNS name
+ if ((size_t)ASN1_STRING_length(current_name->d.dNSName) != strlen(dns_name)) {
+ result = MalformedCertificate;
+ break;
+ }
+ else { // Compare expected hostname with the DNS name
+ if (Curl_cert_hostcheck(dns_name, hostname)
+ == CURL_HOST_MATCH) {
+ result = MatchFound;
+ break;
+ }
+ }
+ }
+ }
+ sk_GENERAL_NAME_pop_free(san_names, GENERAL_NAME_free);
+
+ return result;
+}
+
+
+/**
+* Validates the server's identity by looking for the expected hostname in the
+* server's certificate. As described in RFC 6125, it first tries to find a match
+* in the Subject Alternative Name extension. If the extension is not present in
+* the certificate, it checks the Common Name instead.
+*
+* Returns MatchFound if a match was found.
+* Returns MatchNotFound if no matches were found.
+* Returns MalformedCertificate if any of the hostnames had a NUL character embedded in it.
+* Returns Error if there was an error.
+*/
+HostnameValidationResult validate_hostname(const char *hostname, const X509 *server_cert) {
+ HostnameValidationResult result;
+
+ if((hostname == NULL) || (server_cert == NULL))
+ return Error;
+
+ // First try the Subject Alternative Names extension
+ result = matches_subject_alternative_name(hostname, server_cert);
+ if (result == NoSANPresent) {
+ // Extension was not found: try the Common Name
+ result = matches_common_name(hostname, server_cert);
+ }
+
+ return result;
+}
diff --git a/libs/libevent/docs/sample/openssl_hostname_validation.h b/libs/libevent/docs/sample/openssl_hostname_validation.h
new file mode 100644
index 0000000000..54aa1c436d
--- /dev/null
+++ b/libs/libevent/docs/sample/openssl_hostname_validation.h
@@ -0,0 +1,56 @@
+/* Obtained from: https://github.com/iSECPartners/ssl-conservatory */
+
+/*
+Copyright (C) 2012, iSEC Partners.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+ */
+
+/*
+ * Helper functions to perform basic hostname validation using OpenSSL.
+ *
+ * Please read "everything-you-wanted-to-know-about-openssl.pdf" before
+ * attempting to use this code. This whitepaper describes how the code works,
+ * how it should be used, and what its limitations are.
+ *
+ * Author: Alban Diquet
+ * License: See LICENSE
+ *
+ */
+
+typedef enum {
+ MatchFound,
+ MatchNotFound,
+ NoSANPresent,
+ MalformedCertificate,
+ Error
+} HostnameValidationResult;
+
+/**
+* Validates the server's identity by looking for the expected hostname in the
+* server's certificate. As described in RFC 6125, it first tries to find a match
+* in the Subject Alternative Name extension. If the extension is not present in
+* the certificate, it checks the Common Name instead.
+*
+* Returns MatchFound if a match was found.
+* Returns MatchNotFound if no matches were found.
+* Returns MalformedCertificate if any of the hostnames had a NUL character embedded in it.
+* Returns Error if there was an error.
+*/
+HostnameValidationResult validate_hostname(const char *hostname, const X509 *server_cert);
diff --git a/libs/libevent/docs/sample/signal-test.c b/libs/libevent/docs/sample/signal-test.c
new file mode 100644
index 0000000000..a61642f325
--- /dev/null
+++ b/libs/libevent/docs/sample/signal-test.c
@@ -0,0 +1,75 @@
+/*
+ * Compile with:
+ * cc -I/usr/local/include -o signal-test \
+ * signal-test.c -L/usr/local/lib -levent
+ */
+
+#include <sys/types.h>
+
+#include <event2/event-config.h>
+
+#include <sys/stat.h>
+#ifndef _WIN32
+#include <sys/queue.h>
+#include <unistd.h>
+#include <sys/time.h>
+#else
+#include <winsock2.h>
+#include <windows.h>
+#endif
+#include <signal.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include <event2/event.h>
+
+#ifdef EVENT____func__
+#define __func__ EVENT____func__
+#endif
+
+int called = 0;
+
+static void
+signal_cb(evutil_socket_t fd, short event, void *arg)
+{
+ struct event *signal = arg;
+
+ printf("%s: got signal %d\n", __func__, event_get_signal(signal));
+
+ if (called >= 2)
+ event_del(signal);
+
+ called++;
+}
+
+int
+main(int argc, char **argv)
+{
+ struct event *signal_int;
+ struct event_base* base;
+#ifdef _WIN32
+ WORD wVersionRequested;
+ WSADATA wsaData;
+
+ wVersionRequested = MAKEWORD(2, 2);
+
+ (void) WSAStartup(wVersionRequested, &wsaData);
+#endif
+
+ /* Initalize the event library */
+ base = event_base_new();
+
+ /* Initalize one event */
+ signal_int = evsignal_new(base, SIGINT, signal_cb, event_self_cbarg());
+
+ event_add(signal_int, NULL);
+
+ event_base_dispatch(base);
+ event_base_free(base);
+
+ return (0);
+}
+
diff --git a/libs/libevent/docs/sample/time-test.c b/libs/libevent/docs/sample/time-test.c
new file mode 100644
index 0000000000..c94c18a500
--- /dev/null
+++ b/libs/libevent/docs/sample/time-test.c
@@ -0,0 +1,107 @@
+/*
+ * XXX This sample code was once meant to show how to use the basic Libevent
+ * interfaces, but it never worked on non-Unix platforms, and some of the
+ * interfaces have changed since it was first written. It should probably
+ * be removed or replaced with something better.
+ *
+ * Compile with:
+ * cc -I/usr/local/include -o time-test time-test.c -L/usr/local/lib -levent
+ */
+
+#include <sys/types.h>
+
+#include <event2/event-config.h>
+
+#include <sys/stat.h>
+#ifndef _WIN32
+#include <sys/queue.h>
+#include <unistd.h>
+#endif
+#include <time.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include <event2/event.h>
+#include <event2/event_struct.h>
+#include <event2/util.h>
+
+#ifdef _WIN32
+#include <winsock2.h>
+#endif
+
+struct timeval lasttime;
+
+int event_is_persistent;
+
+static void
+timeout_cb(evutil_socket_t fd, short event, void *arg)
+{
+ struct timeval newtime, difference;
+ struct event *timeout = arg;
+ double elapsed;
+
+ evutil_gettimeofday(&newtime, NULL);
+ evutil_timersub(&newtime, &lasttime, &difference);
+ elapsed = difference.tv_sec +
+ (difference.tv_usec / 1.0e6);
+
+ printf("timeout_cb called at %d: %.3f seconds elapsed.\n",
+ (int)newtime.tv_sec, elapsed);
+ lasttime = newtime;
+
+ if (! event_is_persistent) {
+ struct timeval tv;
+ evutil_timerclear(&tv);
+ tv.tv_sec = 2;
+ event_add(timeout, &tv);
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ struct event timeout;
+ struct timeval tv;
+ struct event_base *base;
+ int flags;
+
+#ifdef _WIN32
+ WORD wVersionRequested;
+ WSADATA wsaData;
+
+ wVersionRequested = MAKEWORD(2, 2);
+
+ (void)WSAStartup(wVersionRequested, &wsaData);
+#endif
+
+ if (argc == 2 && !strcmp(argv[1], "-p")) {
+ event_is_persistent = 1;
+ flags = EV_PERSIST;
+ } else {
+ event_is_persistent = 0;
+ flags = 0;
+ }
+
+ /* Initalize the event library */
+ base = event_base_new();
+
+ /* Initalize one event */
+ event_assign(&timeout, base, -1, flags, timeout_cb, (void*) &timeout);
+
+ evutil_timerclear(&tv);
+ tv.tv_sec = 2;
+ event_add(&timeout, &tv);
+
+ evutil_gettimeofday(&lasttime, NULL);
+
+ event_base_dispatch(base);
+
+ return (0);
+}
+
diff --git a/libs/libevent/docs/select.c b/libs/libevent/docs/select.c
new file mode 100644
index 0000000000..8ae53cc11e
--- /dev/null
+++ b/libs/libevent/docs/select.c
@@ -0,0 +1,346 @@
+/* $OpenBSD: select.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
+
+/*
+ * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef EVENT__HAVE_SELECT
+
+#ifdef __APPLE__
+/* Apple wants us to define this if we might ever pass more than
+ * FD_SETSIZE bits to select(). */
+#define _DARWIN_UNLIMITED_SELECT
+#endif
+
+#include <sys/types.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef EVENT__HAVE_SYS_SELECT_H
+#include <sys/select.h>
+#endif
+#include <sys/queue.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include "event-internal.h"
+#include "evsignal-internal.h"
+#include "event2/thread.h"
+#include "evthread-internal.h"
+#include "log-internal.h"
+#include "evmap-internal.h"
+
+#ifndef EVENT__HAVE_FD_MASK
+/* This type is mandatory, but Android doesn't define it. */
+typedef unsigned long fd_mask;
+#endif
+
+#ifndef NFDBITS
+#define NFDBITS (sizeof(fd_mask)*8)
+#endif
+
+/* Divide positive x by y, rounding up. */
+#define DIV_ROUNDUP(x, y) (((x)+((y)-1))/(y))
+
+/* How many bytes to allocate for N fds? */
+#define SELECT_ALLOC_SIZE(n) \
+ (DIV_ROUNDUP(n, NFDBITS) * sizeof(fd_mask))
+
+struct selectop {
+ int event_fds; /* Highest fd in fd set */
+ int event_fdsz;
+ int resize_out_sets;
+ fd_set *event_readset_in;
+ fd_set *event_writeset_in;
+ fd_set *event_readset_out;
+ fd_set *event_writeset_out;
+};
+
+static void *select_init(struct event_base *);
+static int select_add(struct event_base *, int, short old, short events, void*);
+static int select_del(struct event_base *, int, short old, short events, void*);
+static int select_dispatch(struct event_base *, struct timeval *);
+static void select_dealloc(struct event_base *);
+
+const struct eventop selectops = {
+ "select",
+ select_init,
+ select_add,
+ select_del,
+ select_dispatch,
+ select_dealloc,
+ 0, /* doesn't need reinit. */
+ EV_FEATURE_FDS,
+ 0,
+};
+
+static int select_resize(struct selectop *sop, int fdsz);
+static void select_free_selectop(struct selectop *sop);
+
+static void *
+select_init(struct event_base *base)
+{
+ struct selectop *sop;
+
+ if (!(sop = mm_calloc(1, sizeof(struct selectop))))
+ return (NULL);
+
+ if (select_resize(sop, SELECT_ALLOC_SIZE(32 + 1))) {
+ select_free_selectop(sop);
+ return (NULL);
+ }
+
+ evsig_init_(base);
+
+ evutil_weakrand_seed_(&base->weakrand_seed, 0);
+
+ return (sop);
+}
+
+#ifdef CHECK_INVARIANTS
+static void
+check_selectop(struct selectop *sop)
+{
+ /* nothing to be done here */
+}
+#else
+#define check_selectop(sop) do { (void) sop; } while (0)
+#endif
+
+static int
+select_dispatch(struct event_base *base, struct timeval *tv)
+{
+ int res=0, i, j, nfds;
+ struct selectop *sop = base->evbase;
+
+ check_selectop(sop);
+ if (sop->resize_out_sets) {
+ fd_set *readset_out=NULL, *writeset_out=NULL;
+ size_t sz = sop->event_fdsz;
+ if (!(readset_out = mm_realloc(sop->event_readset_out, sz)))
+ return (-1);
+ sop->event_readset_out = readset_out;
+ if (!(writeset_out = mm_realloc(sop->event_writeset_out, sz))) {
+ /* We don't free readset_out here, since it was
+ * already successfully reallocated. The next time
+ * we call select_dispatch, the realloc will be a
+ * no-op. */
+ return (-1);
+ }
+ sop->event_writeset_out = writeset_out;
+ sop->resize_out_sets = 0;
+ }
+
+ memcpy(sop->event_readset_out, sop->event_readset_in,
+ sop->event_fdsz);
+ memcpy(sop->event_writeset_out, sop->event_writeset_in,
+ sop->event_fdsz);
+
+ nfds = sop->event_fds+1;
+
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+ res = select(nfds, sop->event_readset_out,
+ sop->event_writeset_out, NULL, tv);
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ check_selectop(sop);
+
+ if (res == -1) {
+ if (errno != EINTR) {
+ event_warn("select");
+ return (-1);
+ }
+
+ return (0);
+ }
+
+ event_debug(("%s: select reports %d", __func__, res));
+
+ check_selectop(sop);
+ i = evutil_weakrand_range_(&base->weakrand_seed, nfds);
+ for (j = 0; j < nfds; ++j) {
+ if (++i >= nfds)
+ i = 0;
+ res = 0;
+ if (FD_ISSET(i, sop->event_readset_out))
+ res |= EV_READ;
+ if (FD_ISSET(i, sop->event_writeset_out))
+ res |= EV_WRITE;
+
+ if (res == 0)
+ continue;
+
+ evmap_io_active_(base, i, res);
+ }
+ check_selectop(sop);
+
+ return (0);
+}
+
+static int
+select_resize(struct selectop *sop, int fdsz)
+{
+ fd_set *readset_in = NULL;
+ fd_set *writeset_in = NULL;
+
+ if (sop->event_readset_in)
+ check_selectop(sop);
+
+ if ((readset_in = mm_realloc(sop->event_readset_in, fdsz)) == NULL)
+ goto error;
+ sop->event_readset_in = readset_in;
+ if ((writeset_in = mm_realloc(sop->event_writeset_in, fdsz)) == NULL) {
+ /* Note that this will leave event_readset_in expanded.
+ * That's okay; we wouldn't want to free it, since that would
+ * change the semantics of select_resize from "expand the
+ * readset_in and writeset_in, or return -1" to "expand the
+ * *set_in members, or trash them and return -1."
+ */
+ goto error;
+ }
+ sop->event_writeset_in = writeset_in;
+ sop->resize_out_sets = 1;
+
+ memset((char *)sop->event_readset_in + sop->event_fdsz, 0,
+ fdsz - sop->event_fdsz);
+ memset((char *)sop->event_writeset_in + sop->event_fdsz, 0,
+ fdsz - sop->event_fdsz);
+
+ sop->event_fdsz = fdsz;
+ check_selectop(sop);
+
+ return (0);
+
+ error:
+ event_warn("malloc");
+ return (-1);
+}
+
+
+static int
+select_add(struct event_base *base, int fd, short old, short events, void *p)
+{
+ struct selectop *sop = base->evbase;
+ (void) p;
+
+ EVUTIL_ASSERT((events & EV_SIGNAL) == 0);
+ check_selectop(sop);
+ /*
+ * Keep track of the highest fd, so that we can calculate the size
+ * of the fd_sets for select(2)
+ */
+ if (sop->event_fds < fd) {
+ int fdsz = sop->event_fdsz;
+
+ if (fdsz < (int)sizeof(fd_mask))
+ fdsz = (int)sizeof(fd_mask);
+
+ /* In theory we should worry about overflow here. In
+ * reality, though, the highest fd on a unixy system will
+ * not overflow here. XXXX */
+ while (fdsz < (int) SELECT_ALLOC_SIZE(fd + 1))
+ fdsz *= 2;
+
+ if (fdsz != sop->event_fdsz) {
+ if (select_resize(sop, fdsz)) {
+ check_selectop(sop);
+ return (-1);
+ }
+ }
+
+ sop->event_fds = fd;
+ }
+
+ if (events & EV_READ)
+ FD_SET(fd, sop->event_readset_in);
+ if (events & EV_WRITE)
+ FD_SET(fd, sop->event_writeset_in);
+ check_selectop(sop);
+
+ return (0);
+}
+
+/*
+ * Nothing to be done here.
+ */
+
+static int
+select_del(struct event_base *base, int fd, short old, short events, void *p)
+{
+ struct selectop *sop = base->evbase;
+ (void)p;
+
+ EVUTIL_ASSERT((events & EV_SIGNAL) == 0);
+ check_selectop(sop);
+
+ if (sop->event_fds < fd) {
+ check_selectop(sop);
+ return (0);
+ }
+
+ if (events & EV_READ)
+ FD_CLR(fd, sop->event_readset_in);
+
+ if (events & EV_WRITE)
+ FD_CLR(fd, sop->event_writeset_in);
+
+ check_selectop(sop);
+ return (0);
+}
+
+static void
+select_free_selectop(struct selectop *sop)
+{
+ if (sop->event_readset_in)
+ mm_free(sop->event_readset_in);
+ if (sop->event_writeset_in)
+ mm_free(sop->event_writeset_in);
+ if (sop->event_readset_out)
+ mm_free(sop->event_readset_out);
+ if (sop->event_writeset_out)
+ mm_free(sop->event_writeset_out);
+
+ memset(sop, 0, sizeof(struct selectop));
+ mm_free(sop);
+}
+
+static void
+select_dealloc(struct event_base *base)
+{
+ evsig_dealloc_(base);
+
+ select_free_selectop(base->evbase);
+}
+
+#endif /* EVENT__HAVE_SELECT */
diff --git a/libs/libevent/docs/test/Makefile.nmake b/libs/libevent/docs/test/Makefile.nmake
new file mode 100644
index 0000000000..30c3eb792b
--- /dev/null
+++ b/libs/libevent/docs/test/Makefile.nmake
@@ -0,0 +1,79 @@
+# WATCH OUT! This makefile is a work in progress. -*- makefile -*-
+
+!IFDEF OPENSSL_DIR
+SSL_CFLAGS=/I$(OPENSSL_DIR)\include /DEVENT__HAVE_OPENSSL
+SSL_OBJS=regress_ssl.obj
+SSL_LIBS=..\libevent_openssl.lib $(OPENSSL_DIR)\lib\libeay32.lib $(OPENSSL_DIR)\lib\ssleay32.lib gdi32.lib User32.lib
+!ELSE
+SSL_CFLAGS=
+SSL_OBJS=
+SSL_LIBS=
+!ENDIF
+
+CFLAGS=/I.. /I../WIN32-Code /I../WIN32-Code/nmake /I../include /I../compat /DHAVE_CONFIG_H /DTINYTEST_LOCAL $(SSL_CFLAGS)
+
+CFLAGS=$(CFLAGS) /Ox /W3 /wd4996 /nologo
+
+REGRESS_OBJS=regress.obj regress_buffer.obj regress_http.obj regress_dns.obj \
+ regress_testutils.obj \
+ regress_rpc.obj regress.gen.obj \
+ regress_et.obj regress_bufferevent.obj \
+ regress_listener.obj regress_util.obj tinytest.obj \
+ regress_main.obj regress_minheap.obj regress_iocp.obj \
+ regress_thread.obj regress_finalize.obj $(SSL_OBJS)
+
+OTHER_OBJS=test-init.obj test-eof.obj test-closed.obj test-weof.obj test-time.obj \
+ bench.obj bench_cascade.obj bench_http.obj bench_httpclient.obj \
+ test-changelist.obj \
+ print-winsock-errors.obj
+
+PROGRAMS=regress.exe \
+ test-init.exe test-eof.exe test-closed.exe test-weof.exe test-time.exe \
+ test-changelist.exe \
+ print-winsock-errors.exe
+
+# Disabled for now:
+# bench.exe bench_cascade.exe bench_http.exe bench_httpclient.exe
+
+
+LIBS=..\libevent.lib ws2_32.lib shell32.lib advapi32.lib
+
+all: $(PROGRAMS)
+
+regress.exe: $(REGRESS_OBJS)
+ $(CC) $(CFLAGS) $(LIBS) $(SSL_LIBS) $(REGRESS_OBJS)
+
+test-init.exe: test-init.obj
+ $(CC) $(CFLAGS) $(LIBS) test-init.obj
+test-eof.exe: test-eof.obj
+ $(CC) $(CFLAGS) $(LIBS) test-eof.obj
+test-closed.exe: test-closed.obj
+ $(CC) $(CFLAGS) $(LIBS) test-closed.obj
+test-changelist.exe: test-changelist.obj
+ $(CC) $(CFLAGS) $(LIBS) test-changelist.obj
+test-weof.exe: test-weof.obj
+ $(CC) $(CFLAGS) $(LIBS) test-weof.obj
+test-time.exe: test-time.obj
+ $(CC) $(CFLAGS) $(LIBS) test-time.obj
+
+print-winsock-errors.exe: print-winsock-errors.obj
+ $(CC) $(CFLAGS) $(LIBS) print-winsock-errors.obj
+
+bench.exe: bench.obj
+ $(CC) $(CFLAGS) $(LIBS) bench.obj
+bench_cascade.exe: bench_cascade.obj
+ $(CC) $(CFLAGS) $(LIBS) bench_cascade.obj
+bench_http.exe: bench_http.obj
+ $(CC) $(CFLAGS) $(LIBS) bench_http.obj
+bench_httpclient.exe: bench_httpclient.obj
+ $(CC) $(CFLAGS) $(LIBS) bench_httpclient.obj
+
+regress.gen.c regress.gen.h: regress.rpc ../event_rpcgen.py
+ echo // > regress.gen.c
+ echo #define NO_PYTHON_EXISTS > regress.gen.h
+ -python ..\event_rpcgen.py regress.rpc
+
+clean:
+ -del $(REGRESS_OBJS)
+ -del $(OTHER_OBJS)
+ -del $(PROGRAMS)
diff --git a/libs/libevent/docs/test/bench.c b/libs/libevent/docs/test/bench.c
new file mode 100644
index 0000000000..214479c1ff
--- /dev/null
+++ b/libs/libevent/docs/test/bench.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2003-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Mon 03/10/2003 - Modified by Davide Libenzi <davidel@xmailserver.org>
+ *
+ * Added chain event propagation to improve the sensitivity of
+ * the measure respect to the event loop efficency.
+ *
+ *
+ */
+
+#include "event2/event-config.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef _WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#else
+#include <sys/socket.h>
+#include <signal.h>
+#include <sys/resource.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <errno.h>
+
+#ifdef _WIN32
+#include <getopt.h>
+#endif
+
+#include <event.h>
+#include <evutil.h>
+
+static int count, writes, fired, failures;
+static evutil_socket_t *pipes;
+static int num_pipes, num_active, num_writes;
+static struct event *events;
+
+
+static void
+read_cb(evutil_socket_t fd, short which, void *arg)
+{
+ ev_intptr_t idx = (ev_intptr_t) arg, widx = idx + 1;
+ unsigned char ch;
+ ev_ssize_t n;
+
+ n = recv(fd, (char*)&ch, sizeof(ch), 0);
+ if (n >= 0)
+ count += n;
+ else
+ failures++;
+ if (writes) {
+ if (widx >= num_pipes)
+ widx -= num_pipes;
+ n = send(pipes[2 * widx + 1], "e", 1, 0);
+ if (n != 1)
+ failures++;
+ writes--;
+ fired++;
+ }
+}
+
+static struct timeval *
+run_once(void)
+{
+ evutil_socket_t *cp, space;
+ long i;
+ static struct timeval ts, te;
+
+ for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) {
+ if (event_initialized(&events[i]))
+ event_del(&events[i]);
+ event_set(&events[i], cp[0], EV_READ | EV_PERSIST, read_cb, (void *)(ev_intptr_t) i);
+ event_add(&events[i], NULL);
+ }
+
+ event_loop(EVLOOP_ONCE | EVLOOP_NONBLOCK);
+
+ fired = 0;
+ space = num_pipes / num_active;
+ space = space * 2;
+ for (i = 0; i < num_active; i++, fired++)
+ (void) send(pipes[i * space + 1], "e", 1, 0);
+
+ count = 0;
+ writes = num_writes;
+ { int xcount = 0;
+ evutil_gettimeofday(&ts, NULL);
+ do {
+ event_loop(EVLOOP_ONCE | EVLOOP_NONBLOCK);
+ xcount++;
+ } while (count != fired);
+ evutil_gettimeofday(&te, NULL);
+
+ if (xcount != count) fprintf(stderr, "Xcount: %d, Rcount: %d\n", xcount, count);
+ }
+
+ evutil_timersub(&te, &ts, &te);
+
+ return (&te);
+}
+
+int
+main(int argc, char **argv)
+{
+#ifdef HAVE_SETRLIMIT
+ struct rlimit rl;
+#endif
+ int i, c;
+ struct timeval *tv;
+ evutil_socket_t *cp;
+
+#ifdef _WIN32
+ WSADATA WSAData;
+ WSAStartup(0x101, &WSAData);
+#endif
+ num_pipes = 100;
+ num_active = 1;
+ num_writes = num_pipes;
+ while ((c = getopt(argc, argv, "n:a:w:")) != -1) {
+ switch (c) {
+ case 'n':
+ num_pipes = atoi(optarg);
+ break;
+ case 'a':
+ num_active = atoi(optarg);
+ break;
+ case 'w':
+ num_writes = atoi(optarg);
+ break;
+ default:
+ fprintf(stderr, "Illegal argument \"%c\"\n", c);
+ exit(1);
+ }
+ }
+
+#ifdef HAVE_SETRLIMIT
+ rl.rlim_cur = rl.rlim_max = num_pipes * 2 + 50;
+ if (setrlimit(RLIMIT_NOFILE, &rl) == -1) {
+ perror("setrlimit");
+ exit(1);
+ }
+#endif
+
+ events = calloc(num_pipes, sizeof(struct event));
+ pipes = calloc(num_pipes * 2, sizeof(evutil_socket_t));
+ if (events == NULL || pipes == NULL) {
+ perror("malloc");
+ exit(1);
+ }
+
+ event_init();
+
+ for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) {
+#ifdef USE_PIPES
+ if (pipe(cp) == -1) {
+#else
+ if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, cp) == -1) {
+#endif
+ perror("pipe");
+ exit(1);
+ }
+ }
+
+ for (i = 0; i < 25; i++) {
+ tv = run_once();
+ if (tv == NULL)
+ exit(1);
+ fprintf(stdout, "%ld\n",
+ tv->tv_sec * 1000000L + tv->tv_usec);
+ }
+
+ exit(0);
+}
diff --git a/libs/libevent/docs/test/bench_cascade.c b/libs/libevent/docs/test/bench_cascade.c
new file mode 100644
index 0000000000..2d85cc1f10
--- /dev/null
+++ b/libs/libevent/docs/test/bench_cascade.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "event2/event-config.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef _WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#else
+#include <sys/socket.h>
+#include <sys/resource.h>
+#endif
+#include <signal.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <errno.h>
+#include <getopt.h>
+#include <event.h>
+#include <evutil.h>
+
+/*
+ * This benchmark tests how quickly we can propagate a write down a chain
+ * of socket pairs. We start by writing to the first socket pair and all
+ * events will fire subsequently until the last socket pair has been reached
+ * and the benchmark terminates.
+ */
+
+static int fired;
+static evutil_socket_t *pipes;
+static struct event *events;
+
+static void
+read_cb(evutil_socket_t fd, short which, void *arg)
+{
+ char ch;
+ evutil_socket_t sock = (evutil_socket_t)(ev_intptr_t)arg;
+
+ (void) recv(fd, &ch, sizeof(ch), 0);
+ if (sock >= 0) {
+ if (send(sock, "e", 1, 0) < 0)
+ perror("send");
+ }
+ fired++;
+}
+
+static struct timeval *
+run_once(int num_pipes)
+{
+ int i;
+ evutil_socket_t *cp;
+ static struct timeval ts, te, tv_timeout;
+
+ events = (struct event *)calloc(num_pipes, sizeof(struct event));
+ pipes = (evutil_socket_t *)calloc(num_pipes * 2, sizeof(evutil_socket_t));
+
+ if (events == NULL || pipes == NULL) {
+ perror("malloc");
+ exit(1);
+ }
+
+ for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) {
+ if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, cp) == -1) {
+ perror("socketpair");
+ exit(1);
+ }
+ }
+
+ /* measurements includes event setup */
+ evutil_gettimeofday(&ts, NULL);
+
+ /* provide a default timeout for events */
+ evutil_timerclear(&tv_timeout);
+ tv_timeout.tv_sec = 60;
+
+ for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) {
+ evutil_socket_t fd = i < num_pipes - 1 ? cp[3] : -1;
+ event_set(&events[i], cp[0], EV_READ, read_cb,
+ (void *)(ev_intptr_t)fd);
+ event_add(&events[i], &tv_timeout);
+ }
+
+ fired = 0;
+
+ /* kick everything off with a single write */
+ if (send(pipes[1], "e", 1, 0) < 0)
+ perror("send");
+
+ event_dispatch();
+
+ evutil_gettimeofday(&te, NULL);
+ evutil_timersub(&te, &ts, &te);
+
+ for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) {
+ event_del(&events[i]);
+ evutil_closesocket(cp[0]);
+ evutil_closesocket(cp[1]);
+ }
+
+ free(pipes);
+ free(events);
+
+ return (&te);
+}
+
+int
+main(int argc, char **argv)
+{
+#ifdef HAVE_SETRLIMIT
+ struct rlimit rl;
+#endif
+ int i, c;
+ struct timeval *tv;
+
+ int num_pipes = 100;
+#ifdef _WIN32
+ WSADATA WSAData;
+ WSAStartup(0x101, &WSAData);
+#endif
+
+ while ((c = getopt(argc, argv, "n:")) != -1) {
+ switch (c) {
+ case 'n':
+ num_pipes = atoi(optarg);
+ break;
+ default:
+ fprintf(stderr, "Illegal argument \"%c\"\n", c);
+ exit(1);
+ }
+ }
+
+#ifdef HAVE_SETRLIMIT
+ rl.rlim_cur = rl.rlim_max = num_pipes * 2 + 50;
+ if (setrlimit(RLIMIT_NOFILE, &rl) == -1) {
+ perror("setrlimit");
+ exit(1);
+ }
+#endif
+
+ event_init();
+
+ for (i = 0; i < 25; i++) {
+ tv = run_once(num_pipes);
+ if (tv == NULL)
+ exit(1);
+ fprintf(stdout, "%ld\n",
+ tv->tv_sec * 1000000L + tv->tv_usec);
+ }
+
+#ifdef _WIN32
+ WSACleanup();
+#endif
+
+ exit(0);
+}
diff --git a/libs/libevent/docs/test/bench_http.c b/libs/libevent/docs/test/bench_http.c
new file mode 100644
index 0000000000..6d0d971799
--- /dev/null
+++ b/libs/libevent/docs/test/bench_http.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2008-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef _WIN32
+#include <winsock2.h>
+#else
+#include <sys/socket.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+#include <fcntl.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include "event2/event.h"
+#include "event2/buffer.h"
+#include "event2/util.h"
+#include "event2/http.h"
+#include "event2/thread.h"
+
+static void http_basic_cb(struct evhttp_request *req, void *arg);
+
+static char *content;
+static size_t content_len = 0;
+
+static void
+http_basic_cb(struct evhttp_request *req, void *arg)
+{
+ struct evbuffer *evb = evbuffer_new();
+
+ evbuffer_add(evb, content, content_len);
+
+ /* allow sending of an empty reply */
+ evhttp_send_reply(req, HTTP_OK, "Everything is fine", evb);
+
+ evbuffer_free(evb);
+}
+
+#if LIBEVENT_VERSION_NUMBER >= 0x02000200
+static void
+http_ref_cb(struct evhttp_request *req, void *arg)
+{
+ struct evbuffer *evb = evbuffer_new();
+
+ evbuffer_add_reference(evb, content, content_len, NULL, NULL);
+
+ /* allow sending of an empty reply */
+ evhttp_send_reply(req, HTTP_OK, "Everything is fine", evb);
+
+ evbuffer_free(evb);
+}
+#endif
+
+int
+main(int argc, char **argv)
+{
+ struct event_config *cfg = event_config_new();
+ struct event_base *base;
+ struct evhttp *http;
+ int i;
+ int c;
+ int use_iocp = 0;
+ unsigned short port = 8080;
+ char *endptr = NULL;
+
+#ifdef _WIN32
+ WSADATA WSAData;
+ WSAStartup(0x101, &WSAData);
+#else
+ if (signal(SIGPIPE, SIG_IGN) == SIG_ERR)
+ return (1);
+#endif
+
+ for (i = 1; i < argc; ++i) {
+ if (*argv[i] != '-')
+ continue;
+
+ c = argv[i][1];
+
+ if ((c == 'p' || c == 'l') && i + 1 >= argc) {
+ fprintf(stderr, "-%c requires argument.\n", c);
+ exit(1);
+ }
+
+ switch (c) {
+ case 'p':
+ if (i+1 >= argc || !argv[i+1]) {
+ fprintf(stderr, "Missing port\n");
+ exit(1);
+ }
+ port = (int)strtol(argv[i+1], &endptr, 10);
+ if (*endptr != '\0') {
+ fprintf(stderr, "Bad port\n");
+ exit(1);
+ }
+ break;
+ case 'l':
+ if (i+1 >= argc || !argv[i+1]) {
+ fprintf(stderr, "Missing content length\n");
+ exit(1);
+ }
+ content_len = (size_t)strtol(argv[i+1], &endptr, 10);
+ if (*endptr != '\0' || content_len == 0) {
+ fprintf(stderr, "Bad content length\n");
+ exit(1);
+ }
+ break;
+#ifdef _WIN32
+ case 'i':
+ use_iocp = 1;
+ evthread_use_windows_threads();
+ event_config_set_flag(cfg,EVENT_BASE_FLAG_STARTUP_IOCP);
+ break;
+#endif
+ default:
+ fprintf(stderr, "Illegal argument \"%c\"\n", c);
+ exit(1);
+ }
+ }
+
+ base = event_base_new_with_config(cfg);
+ if (!base) {
+ fprintf(stderr, "creating event_base failed. Exiting.\n");
+ return 1;
+ }
+
+ http = evhttp_new(base);
+
+ content = malloc(content_len);
+ if (content == NULL) {
+ fprintf(stderr, "Cannot allocate content\n");
+ exit(1);
+ } else {
+ int i = 0;
+ for (i = 0; i < (int)content_len; ++i)
+ content[i] = (i & 255);
+ }
+
+ evhttp_set_cb(http, "/ind", http_basic_cb, NULL);
+ fprintf(stderr, "/ind - basic content (memory copy)\n");
+
+ evhttp_set_cb(http, "/ref", http_ref_cb, NULL);
+ fprintf(stderr, "/ref - basic content (reference)\n");
+
+ fprintf(stderr, "Serving %d bytes on port %d using %s\n",
+ (int)content_len, port,
+ use_iocp? "IOCP" : event_base_get_method(base));
+
+ evhttp_bind_socket(http, "0.0.0.0", port);
+
+#ifdef _WIN32
+ if (use_iocp) {
+ struct timeval tv={99999999,0};
+ event_base_loopexit(base, &tv);
+ }
+#endif
+ event_base_dispatch(base);
+
+#ifdef _WIN32
+ WSACleanup();
+#endif
+
+ /* NOTREACHED */
+ return (0);
+}
diff --git a/libs/libevent/docs/test/bench_httpclient.c b/libs/libevent/docs/test/bench_httpclient.c
new file mode 100644
index 0000000000..bcddc95f43
--- /dev/null
+++ b/libs/libevent/docs/test/bench_httpclient.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/* for EVUTIL_ERR_CONNECT_RETRIABLE macro */
+#include "util-internal.h"
+
+#include <sys/types.h>
+#ifdef _WIN32
+#include <winsock2.h>
+#else
+#include <sys/socket.h>
+#include <netinet/in.h>
+# ifdef _XOPEN_SOURCE_EXTENDED
+# include <arpa/inet.h>
+# endif
+#endif
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include "event2/event.h"
+#include "event2/bufferevent.h"
+#include "event2/buffer.h"
+#include "event2/util.h"
+
+const char *resource = NULL;
+struct event_base *base = NULL;
+
+int total_n_handled = 0;
+int total_n_errors = 0;
+int total_n_launched = 0;
+size_t total_n_bytes = 0;
+struct timeval total_time = {0,0};
+int n_errors = 0;
+
+const int PARALLELISM = 200;
+const int N_REQUESTS = 20000;
+
+struct request_info {
+ size_t n_read;
+ struct timeval started;
+};
+
+static int launch_request(void);
+static void readcb(struct bufferevent *b, void *arg);
+static void errorcb(struct bufferevent *b, short what, void *arg);
+
+static void
+readcb(struct bufferevent *b, void *arg)
+{
+ struct request_info *ri = arg;
+ struct evbuffer *input = bufferevent_get_input(b);
+ size_t n = evbuffer_get_length(input);
+
+ ri->n_read += n;
+ evbuffer_drain(input, n);
+}
+
+static void
+errorcb(struct bufferevent *b, short what, void *arg)
+{
+ struct request_info *ri = arg;
+ struct timeval now, diff;
+ if (what & BEV_EVENT_EOF) {
+ ++total_n_handled;
+ total_n_bytes += ri->n_read;
+ evutil_gettimeofday(&now, NULL);
+ evutil_timersub(&now, &ri->started, &diff);
+ evutil_timeradd(&diff, &total_time, &total_time);
+
+ if (total_n_handled && (total_n_handled%1000)==0)
+ printf("%d requests done\n",total_n_handled);
+
+ if (total_n_launched < N_REQUESTS) {
+ if (launch_request() < 0)
+ perror("Can't launch");
+ }
+ } else {
+ ++total_n_errors;
+ perror("Unexpected error");
+ }
+
+ bufferevent_setcb(b, NULL, NULL, NULL, NULL);
+ free(ri);
+ bufferevent_disable(b, EV_READ|EV_WRITE);
+ bufferevent_free(b);
+}
+
+static void
+frob_socket(evutil_socket_t sock)
+{
+#ifdef HAVE_SO_LINGER
+ struct linger l;
+#endif
+ int one = 1;
+ if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (void*)&one, sizeof(one))<0)
+ perror("setsockopt(SO_REUSEADDR)");
+#ifdef HAVE_SO_LINGER
+ l.l_onoff = 1;
+ l.l_linger = 0;
+ if (setsockopt(sock, SOL_SOCKET, SO_LINGER, (void*)&l, sizeof(l))<0)
+ perror("setsockopt(SO_LINGER)");
+#endif
+}
+
+static int
+launch_request(void)
+{
+ evutil_socket_t sock;
+ struct sockaddr_in sin;
+ struct bufferevent *b;
+
+ struct request_info *ri;
+
+ memset(&sin, 0, sizeof(sin));
+
+ ++total_n_launched;
+
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = htonl(0x7f000001);
+ sin.sin_port = htons(8080);
+ if ((sock = socket(AF_INET, SOCK_STREAM, 0)) < 0)
+ return -1;
+ if (evutil_make_socket_nonblocking(sock) < 0) {
+ evutil_closesocket(sock);
+ return -1;
+ }
+ frob_socket(sock);
+ if (connect(sock, (struct sockaddr*)&sin, sizeof(sin)) < 0) {
+ int e = evutil_socket_geterror(sock);
+ if (! EVUTIL_ERR_CONNECT_RETRIABLE(e)) {
+ evutil_closesocket(sock);
+ return -1;
+ }
+ }
+
+ ri = malloc(sizeof(*ri));
+ ri->n_read = 0;
+ evutil_gettimeofday(&ri->started, NULL);
+
+ b = bufferevent_socket_new(base, sock, BEV_OPT_CLOSE_ON_FREE);
+
+ bufferevent_setcb(b, readcb, NULL, errorcb, ri);
+ bufferevent_enable(b, EV_READ|EV_WRITE);
+
+ evbuffer_add_printf(bufferevent_get_output(b),
+ "GET %s HTTP/1.0\r\n\r\n", resource);
+
+ return 0;
+}
+
+
+int
+main(int argc, char **argv)
+{
+ int i;
+ struct timeval start, end, total;
+ long long usec;
+ double throughput;
+ resource = "/ref";
+
+#ifdef _WIN32
+ WSADATA WSAData;
+ WSAStartup(0x101, &WSAData);
+#endif
+
+ setvbuf(stdout, NULL, _IONBF, 0);
+
+ base = event_base_new();
+
+ for (i=0; i < PARALLELISM; ++i) {
+ if (launch_request() < 0)
+ perror("launch");
+ }
+
+ evutil_gettimeofday(&start, NULL);
+
+ event_base_dispatch(base);
+
+ evutil_gettimeofday(&end, NULL);
+ evutil_timersub(&end, &start, &total);
+ usec = total_time.tv_sec * (long long)1000000 + total_time.tv_usec;
+
+ if (!total_n_handled) {
+ puts("Nothing worked. You probably did something dumb.");
+ return 0;
+ }
+
+
+ throughput = total_n_handled /
+ (total.tv_sec+ ((double)total.tv_usec)/1000000.0);
+
+#ifdef _WIN32
+#define I64_FMT "%I64d"
+#define I64_TYP __int64
+#else
+#define I64_FMT "%lld"
+#define I64_TYP long long int
+#endif
+
+ printf("\n%d requests in %d.%06d sec. (%.2f throughput)\n"
+ "Each took about %.02f msec latency\n"
+ I64_FMT "bytes read. %d errors.\n",
+ total_n_handled,
+ (int)total.tv_sec, (int)total.tv_usec,
+ throughput,
+ (double)(usec/1000) / total_n_handled,
+ (I64_TYP)total_n_bytes, n_errors);
+
+#ifdef _WIN32
+ WSACleanup();
+#endif
+
+ return 0;
+}
diff --git a/libs/libevent/docs/test/check-dumpevents.py b/libs/libevent/docs/test/check-dumpevents.py
new file mode 100644
index 0000000000..16fe9bc92f
--- /dev/null
+++ b/libs/libevent/docs/test/check-dumpevents.py
@@ -0,0 +1,54 @@
+#!/usr/bin/python2
+#
+# Post-process the output of test-dumpevents and check it for correctness.
+#
+
+import math
+import re
+import sys
+
+text = sys.stdin.readlines()
+
+try:
+ expect_inserted_pos = text.index("Inserted:\n")
+ expect_active_pos = text.index("Active:\n")
+ got_inserted_pos = text.index("Inserted events:\n")
+ got_active_pos = text.index("Active events:\n")
+except ValueError:
+ print >>sys.stderr, "Missing expected dividing line in dumpevents output"
+ sys.exit(1)
+
+if not (expect_inserted_pos < expect_active_pos <
+ got_inserted_pos < got_active_pos):
+ print >>sys.stderr, "Sections out of order in dumpevents output"
+ sys.exit(1)
+
+now,T= text[1].split()
+T = float(T)
+
+want_inserted = set(text[expect_inserted_pos+1:expect_active_pos])
+want_active = set(text[expect_active_pos+1:got_inserted_pos-1])
+got_inserted = set(text[got_inserted_pos+1:got_active_pos])
+got_active = set(text[got_active_pos+1:])
+
+pat = re.compile(r'Timeout=([0-9\.]+)')
+def replace_time(m):
+ t = float(m.group(1))
+ if .9 < abs(t-T) < 1.1:
+ return "Timeout=T+1"
+ elif 2.4 < abs(t-T) < 2.6:
+ return "Timeout=T+2.5"
+ else:
+ return m.group(0)
+
+cleaned_inserted = set( pat.sub(replace_time, s) for s in got_inserted
+ if "Internal" not in s)
+
+if cleaned_inserted != want_inserted:
+ print >>sys.stderr, "Inserted event lists were not as expected!"
+ sys.exit(1)
+
+if set(got_active) != set(want_active):
+ print >>sys.stderr, "Active event lists were not as expected!"
+ sys.exit(1)
+
diff --git a/libs/libevent/docs/test/include.am b/libs/libevent/docs/test/include.am
new file mode 100644
index 0000000000..4cd49ef630
--- /dev/null
+++ b/libs/libevent/docs/test/include.am
@@ -0,0 +1,146 @@
+# test/Makefile.am for libevent
+# Copyright 2000-2007 Niels Provos
+# Copyright 2007-2012 Niels Provos and Nick Mathewson
+#
+# See LICENSE for copying information.
+
+regress_CPPFLAGS = -DTINYTEST_LOCAL
+
+EXTRA_DIST+= \
+ test/check-dumpevents.py \
+ test/regress.gen.c \
+ test/regress.gen.h \
+ test/regress.rpc \
+ test/rpcgen_wrapper.sh \
+ test/test.sh
+
+TESTPROGRAMS = \
+ test/bench \
+ test/bench_cascade \
+ test/bench_http \
+ test/bench_httpclient \
+ test/test-changelist \
+ test/test-dumpevents \
+ test/test-eof \
+ test/test-closed \
+ test/test-fdleak \
+ test/test-init \
+ test/test-ratelim \
+ test/test-time \
+ test/test-weof \
+ test/regress
+
+if BUILD_REGRESS
+noinst_PROGRAMS += $(TESTPROGRAMS)
+EXTRA_PROGRAMS+= test/regress
+endif
+
+noinst_HEADERS+= \
+ test/regress.h \
+ test/regress_thread.h \
+ test/tinytest.h \
+ test/tinytest_local.h \
+ test/tinytest_macros.h
+
+# We need to copy this file, since automake doesn't want us to use top_srcdir
+# in TESTS.
+TESTS = test/test-script.sh
+
+test/test-script.sh: test/test.sh
+ cp $(top_srcdir)/test/test.sh $@
+
+DISTCLEANFILES += test/test-script.sh
+DISTCLEANFILES += test/regress.gen.c test/regress.gen.h
+
+if BUILD_REGRESS
+BUILT_SOURCES += test/regress.gen.c test/regress.gen.h
+endif
+
+test_test_init_SOURCES = test/test-init.c
+test_test_init_LDADD = libevent_core.la
+test_test_dumpevents_SOURCES = test/test-dumpevents.c
+test_test_dumpevents_LDADD = libevent_core.la
+test_test_eof_SOURCES = test/test-eof.c
+test_test_eof_LDADD = libevent_core.la
+test_test_closed_SOURCES = test/test-closed.c
+test_test_closed_LDADD = libevent_core.la
+test_test_changelist_SOURCES = test/test-changelist.c
+test_test_changelist_LDADD = libevent_core.la
+test_test_weof_SOURCES = test/test-weof.c
+test_test_weof_LDADD = libevent_core.la
+test_test_time_SOURCES = test/test-time.c
+test_test_time_LDADD = libevent_core.la
+test_test_ratelim_SOURCES = test/test-ratelim.c
+test_test_ratelim_LDADD = libevent_core.la -lm
+test_test_fdleak_SOURCES = test/test-fdleak.c
+test_test_fdleak_LDADD = libevent_core.la
+
+test_regress_SOURCES = \
+ test/regress.c \
+ test/regress.gen.c \
+ test/regress.gen.h \
+ test/regress_buffer.c \
+ test/regress_bufferevent.c \
+ test/regress_dns.c \
+ test/regress_et.c \
+ test/regress_finalize.c \
+ test/regress_http.c \
+ test/regress_listener.c \
+ test/regress_main.c \
+ test/regress_minheap.c \
+ test/regress_rpc.c \
+ test/regress_testutils.c \
+ test/regress_testutils.h \
+ test/regress_util.c \
+ test/tinytest.c \
+ $(regress_thread_SOURCES) \
+ $(regress_zlib_SOURCES)
+
+if PTHREADS
+regress_thread_SOURCES = test/regress_thread.c
+PTHREAD_LIBS += libevent_pthreads.la
+endif
+if BUILD_WIN32
+regress_thread_SOURCES = test/regress_thread.c
+endif
+if ZLIB_REGRESS
+regress_zlib_SOURCES = test/regress_zlib.c
+endif
+if BUILD_WIN32
+test_regress_SOURCES += test/regress_iocp.c
+endif
+
+test_regress_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la $(PTHREAD_LIBS) $(ZLIB_LIBS)
+test_regress_CPPFLAGS = $(AM_CPPFLAGS) $(PTHREAD_CFLAGS) $(ZLIB_CFLAGS) -Itest
+test_regress_LDFLAGS = $(PTHREAD_CFLAGS)
+
+if OPENSSL
+test_regress_SOURCES += test/regress_ssl.c
+test_regress_CPPFLAGS += $(OPENSSL_INCS)
+test_regress_LDADD += libevent_openssl.la $(OPENSSL_LIBS) ${OPENSSL_LIBADD}
+endif
+
+test_bench_SOURCES = test/bench.c
+test_bench_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la
+test_bench_cascade_SOURCES = test/bench_cascade.c
+test_bench_cascade_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la
+test_bench_http_SOURCES = test/bench_http.c
+test_bench_http_LDADD = $(LIBEVENT_GC_SECTIONS) libevent.la
+test_bench_httpclient_SOURCES = test/bench_httpclient.c
+test_bench_httpclient_LDADD = $(LIBEVENT_GC_SECTIONS) libevent_core.la
+
+test/regress.gen.c test/regress.gen.h: test/rpcgen-attempted
+
+test/rpcgen-attempted: test/regress.rpc event_rpcgen.py test/rpcgen_wrapper.sh
+ $(AM_V_GEN)date -u > $@
+ $(AM_V_at)if $(srcdir)/test/rpcgen_wrapper.sh $(srcdir)/test; then \
+ true; \
+ else \
+ echo "No Python installed; stubbing out RPC test." >&2; \
+ echo " "> test/regress.gen.c; \
+ echo "#define NO_PYTHON_EXISTS" > test/regress.gen.h; \
+ fi
+
+CLEANFILES += test/rpcgen-attempted
+
+$(TESTPROGRAMS) : libevent.la
diff --git a/libs/libevent/docs/test/print-winsock-errors.c b/libs/libevent/docs/test/print-winsock-errors.c
new file mode 100644
index 0000000000..ab6e610e84
--- /dev/null
+++ b/libs/libevent/docs/test/print-winsock-errors.c
@@ -0,0 +1,84 @@
+#include <winsock2.h>
+#include <windows.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "event2/event.h"
+#include "event2/util.h"
+#include "event2/thread.h"
+
+#define E(x) printf (#x " -> \"%s\"\n", evutil_socket_error_to_string (x));
+
+int main (int argc, char **argv)
+{
+ int i, j;
+ const char *s1, *s2;
+
+ evthread_use_windows_threads ();
+
+ s1 = evutil_socket_error_to_string (WSAEINTR);
+
+ for (i = 0; i < 3; i++) {
+ printf ("\niteration %d:\n\n", i);
+ E(WSAEINTR);
+ E(WSAEACCES);
+ E(WSAEFAULT);
+ E(WSAEINVAL);
+ E(WSAEMFILE);
+ E(WSAEWOULDBLOCK);
+ E(WSAEINPROGRESS);
+ E(WSAEALREADY);
+ E(WSAENOTSOCK);
+ E(WSAEDESTADDRREQ);
+ E(WSAEMSGSIZE);
+ E(WSAEPROTOTYPE);
+ E(WSAENOPROTOOPT);
+ E(WSAEPROTONOSUPPORT);
+ E(WSAESOCKTNOSUPPORT);
+ E(WSAEOPNOTSUPP);
+ E(WSAEPFNOSUPPORT);
+ E(WSAEAFNOSUPPORT);
+ E(WSAEADDRINUSE);
+ E(WSAEADDRNOTAVAIL);
+ E(WSAENETDOWN);
+ E(WSAENETUNREACH);
+ E(WSAENETRESET);
+ E(WSAECONNABORTED);
+ E(WSAECONNRESET);
+ E(WSAENOBUFS);
+ E(WSAEISCONN);
+ E(WSAENOTCONN);
+ E(WSAESHUTDOWN);
+ E(WSAETIMEDOUT);
+ E(WSAECONNREFUSED);
+ E(WSAEHOSTDOWN);
+ E(WSAEHOSTUNREACH);
+ E(WSAEPROCLIM);
+ E(WSASYSNOTREADY);
+ E(WSAVERNOTSUPPORTED);
+ E(WSANOTINITIALISED);
+ E(WSAEDISCON);
+ E(WSATYPE_NOT_FOUND);
+ E(WSAHOST_NOT_FOUND);
+ E(WSATRY_AGAIN);
+ E(WSANO_RECOVERY);
+ E(WSANO_DATA);
+ E(0xdeadbeef); /* test the case where no message is available */
+
+ /* fill up the hash table a bit to make sure it grows properly */
+ for (j = 0; j < 50; j++) {
+ int err;
+ evutil_secure_rng_get_bytes(&err, sizeof(err));
+ evutil_socket_error_to_string(err);
+ }
+ }
+
+ s2 = evutil_socket_error_to_string (WSAEINTR);
+ if (s1 != s2)
+ printf ("caching failed!\n");
+
+ libevent_global_shutdown ();
+
+ return EXIT_SUCCESS;
+}
diff --git a/libs/libevent/docs/test/regress.c b/libs/libevent/docs/test/regress.c
new file mode 100644
index 0000000000..b12c66dfa0
--- /dev/null
+++ b/libs/libevent/docs/test/regress.c
@@ -0,0 +1,3401 @@
+/*
+ * Copyright (c) 2003-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "util-internal.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+#ifdef EVENT__HAVE_PTHREADS
+#include <pthread.h>
+#endif
+
+#include "event2/event-config.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifndef _WIN32
+#include <sys/socket.h>
+#include <sys/wait.h>
+#include <signal.h>
+#include <unistd.h>
+#include <netdb.h>
+#endif
+#include <fcntl.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+#include <ctype.h>
+
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "event2/event_compat.h"
+#include "event2/tag.h"
+#include "event2/buffer.h"
+#include "event2/buffer_compat.h"
+#include "event2/util.h"
+#include "event-internal.h"
+#include "evthread-internal.h"
+#include "log-internal.h"
+#include "time-internal.h"
+
+#include "regress.h"
+
+#ifndef _WIN32
+#include "regress.gen.h"
+#endif
+
+evutil_socket_t pair[2];
+int test_ok;
+int called;
+struct event_base *global_base;
+
+static char wbuf[4096];
+static char rbuf[4096];
+static int woff;
+static int roff;
+static int usepersist;
+static struct timeval tset;
+static struct timeval tcalled;
+
+
+#define TEST1 "this is a test"
+
+#ifndef SHUT_WR
+#define SHUT_WR 1
+#endif
+
+#ifdef _WIN32
+#define write(fd,buf,len) send((fd),(buf),(int)(len),0)
+#define read(fd,buf,len) recv((fd),(buf),(int)(len),0)
+#endif
+
+struct basic_cb_args
+{
+ struct event_base *eb;
+ struct event *ev;
+ unsigned int callcount;
+};
+
+static void
+simple_read_cb(evutil_socket_t fd, short event, void *arg)
+{
+ char buf[256];
+ int len;
+
+ len = read(fd, buf, sizeof(buf));
+
+ if (len) {
+ if (!called) {
+ if (event_add(arg, NULL) == -1)
+ exit(1);
+ }
+ } else if (called == 1)
+ test_ok = 1;
+
+ called++;
+}
+
+static void
+basic_read_cb(evutil_socket_t fd, short event, void *data)
+{
+ char buf[256];
+ int len;
+ struct basic_cb_args *arg = data;
+
+ len = read(fd, buf, sizeof(buf));
+
+ if (len < 0) {
+ tt_fail_perror("read (callback)");
+ } else {
+ switch (arg->callcount++) {
+ case 0: /* first call: expect to read data; cycle */
+ if (len > 0)
+ return;
+
+ tt_fail_msg("EOF before data read");
+ break;
+
+ case 1: /* second call: expect EOF; stop */
+ if (len > 0)
+ tt_fail_msg("not all data read on first cycle");
+ break;
+
+ default: /* third call: should not happen */
+ tt_fail_msg("too many cycles");
+ }
+ }
+
+ event_del(arg->ev);
+ event_base_loopexit(arg->eb, NULL);
+}
+
+static void
+dummy_read_cb(evutil_socket_t fd, short event, void *arg)
+{
+}
+
+static void
+simple_write_cb(evutil_socket_t fd, short event, void *arg)
+{
+ int len;
+
+ len = write(fd, TEST1, strlen(TEST1) + 1);
+ if (len == -1)
+ test_ok = 0;
+ else
+ test_ok = 1;
+}
+
+static void
+multiple_write_cb(evutil_socket_t fd, short event, void *arg)
+{
+ struct event *ev = arg;
+ int len;
+
+ len = 128;
+ if (woff + len >= (int)sizeof(wbuf))
+ len = sizeof(wbuf) - woff;
+
+ len = write(fd, wbuf + woff, len);
+ if (len == -1) {
+ fprintf(stderr, "%s: write\n", __func__);
+ if (usepersist)
+ event_del(ev);
+ return;
+ }
+
+ woff += len;
+
+ if (woff >= (int)sizeof(wbuf)) {
+ shutdown(fd, SHUT_WR);
+ if (usepersist)
+ event_del(ev);
+ return;
+ }
+
+ if (!usepersist) {
+ if (event_add(ev, NULL) == -1)
+ exit(1);
+ }
+}
+
+static void
+multiple_read_cb(evutil_socket_t fd, short event, void *arg)
+{
+ struct event *ev = arg;
+ int len;
+
+ len = read(fd, rbuf + roff, sizeof(rbuf) - roff);
+ if (len == -1)
+ fprintf(stderr, "%s: read\n", __func__);
+ if (len <= 0) {
+ if (usepersist)
+ event_del(ev);
+ return;
+ }
+
+ roff += len;
+ if (!usepersist) {
+ if (event_add(ev, NULL) == -1)
+ exit(1);
+ }
+}
+
+static void
+timeout_cb(evutil_socket_t fd, short event, void *arg)
+{
+ evutil_gettimeofday(&tcalled, NULL);
+}
+
+struct both {
+ struct event ev;
+ int nread;
+};
+
+static void
+combined_read_cb(evutil_socket_t fd, short event, void *arg)
+{
+ struct both *both = arg;
+ char buf[128];
+ int len;
+
+ len = read(fd, buf, sizeof(buf));
+ if (len == -1)
+ fprintf(stderr, "%s: read\n", __func__);
+ if (len <= 0)
+ return;
+
+ both->nread += len;
+ if (event_add(&both->ev, NULL) == -1)
+ exit(1);
+}
+
+static void
+combined_write_cb(evutil_socket_t fd, short event, void *arg)
+{
+ struct both *both = arg;
+ char buf[128];
+ int len;
+
+ len = sizeof(buf);
+ if (len > both->nread)
+ len = both->nread;
+
+ memset(buf, 'q', len);
+
+ len = write(fd, buf, len);
+ if (len == -1)
+ fprintf(stderr, "%s: write\n", __func__);
+ if (len <= 0) {
+ shutdown(fd, SHUT_WR);
+ return;
+ }
+
+ both->nread -= len;
+ if (event_add(&both->ev, NULL) == -1)
+ exit(1);
+}
+
+/* These macros used to replicate the work of the legacy test wrapper code */
+#define setup_test(x) do { \
+ if (!in_legacy_test_wrapper) { \
+ TT_FAIL(("Legacy test %s not wrapped properly", x)); \
+ return; \
+ } \
+ } while (0)
+#define cleanup_test() setup_test("cleanup")
+
+static void
+test_simpleread(void)
+{
+ struct event ev;
+
+ /* Very simple read test */
+ setup_test("Simple read: ");
+
+ if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
+ tt_fail_perror("write");
+ }
+
+ shutdown(pair[0], SHUT_WR);
+
+ event_set(&ev, pair[1], EV_READ, simple_read_cb, &ev);
+ if (event_add(&ev, NULL) == -1)
+ exit(1);
+ event_dispatch();
+
+ cleanup_test();
+}
+
+static void
+test_simplewrite(void)
+{
+ struct event ev;
+
+ /* Very simple write test */
+ setup_test("Simple write: ");
+
+ event_set(&ev, pair[0], EV_WRITE, simple_write_cb, &ev);
+ if (event_add(&ev, NULL) == -1)
+ exit(1);
+ event_dispatch();
+
+ cleanup_test();
+}
+
+static void
+simpleread_multiple_cb(evutil_socket_t fd, short event, void *arg)
+{
+ if (++called == 2)
+ test_ok = 1;
+}
+
+static void
+test_simpleread_multiple(void)
+{
+ struct event one, two;
+
+ /* Very simple read test */
+ setup_test("Simple read to multiple evens: ");
+
+ if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
+ tt_fail_perror("write");
+ }
+
+ shutdown(pair[0], SHUT_WR);
+
+ event_set(&one, pair[1], EV_READ, simpleread_multiple_cb, NULL);
+ if (event_add(&one, NULL) == -1)
+ exit(1);
+ event_set(&two, pair[1], EV_READ, simpleread_multiple_cb, NULL);
+ if (event_add(&two, NULL) == -1)
+ exit(1);
+ event_dispatch();
+
+ cleanup_test();
+}
+
+static int have_closed = 0;
+static int premature_event = 0;
+static void
+simpleclose_close_fd_cb(evutil_socket_t s, short what, void *ptr)
+{
+ evutil_socket_t **fds = ptr;
+ TT_BLATHER(("Closing"));
+ evutil_closesocket(*fds[0]);
+ evutil_closesocket(*fds[1]);
+ *fds[0] = -1;
+ *fds[1] = -1;
+ have_closed = 1;
+}
+
+static void
+record_event_cb(evutil_socket_t s, short what, void *ptr)
+{
+ short *whatp = ptr;
+ if (!have_closed)
+ premature_event = 1;
+ *whatp = what;
+ TT_BLATHER(("Recorded %d on socket %d", (int)what, (int)s));
+}
+
+static void
+test_simpleclose(void *ptr)
+{
+ /* Test that a close of FD is detected as a read and as a write. */
+ struct event_base *base = event_base_new();
+ evutil_socket_t pair1[2]={-1,-1}, pair2[2] = {-1, -1};
+ evutil_socket_t *to_close[2];
+ struct event *rev=NULL, *wev=NULL, *closeev=NULL;
+ struct timeval tv;
+ short got_read_on_close = 0, got_write_on_close = 0;
+ char buf[1024];
+ memset(buf, 99, sizeof(buf));
+#ifdef _WIN32
+#define LOCAL_SOCKETPAIR_AF AF_INET
+#else
+#define LOCAL_SOCKETPAIR_AF AF_UNIX
+#endif
+ if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair1)<0)
+ TT_DIE(("socketpair: %s", strerror(errno)));
+ if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair2)<0)
+ TT_DIE(("socketpair: %s", strerror(errno)));
+ if (evutil_make_socket_nonblocking(pair1[1]) < 0)
+ TT_DIE(("make_socket_nonblocking"));
+ if (evutil_make_socket_nonblocking(pair2[1]) < 0)
+ TT_DIE(("make_socket_nonblocking"));
+
+ /** Stuff pair2[1] full of data, until write fails */
+ while (1) {
+ int r = write(pair2[1], buf, sizeof(buf));
+ if (r<0) {
+ int err = evutil_socket_geterror(pair2[1]);
+ if (! EVUTIL_ERR_RW_RETRIABLE(err))
+ TT_DIE(("write failed strangely: %s",
+ evutil_socket_error_to_string(err)));
+ break;
+ }
+ }
+ to_close[0] = &pair1[0];
+ to_close[1] = &pair2[0];
+
+ closeev = event_new(base, -1, EV_TIMEOUT, simpleclose_close_fd_cb,
+ to_close);
+ rev = event_new(base, pair1[1], EV_READ, record_event_cb,
+ &got_read_on_close);
+ TT_BLATHER(("Waiting for read on %d", (int)pair1[1]));
+ wev = event_new(base, pair2[1], EV_WRITE, record_event_cb,
+ &got_write_on_close);
+ TT_BLATHER(("Waiting for write on %d", (int)pair2[1]));
+ tv.tv_sec = 0;
+ tv.tv_usec = 100*1000; /* Close pair1[0] after a little while, and make
+ * sure we get a read event. */
+ event_add(closeev, &tv);
+ event_add(rev, NULL);
+ event_add(wev, NULL);
+ /* Don't let the test go on too long. */
+ tv.tv_sec = 0;
+ tv.tv_usec = 200*1000;
+ event_base_loopexit(base, &tv);
+ event_base_loop(base, 0);
+
+ tt_int_op(got_read_on_close, ==, EV_READ);
+ tt_int_op(got_write_on_close, ==, EV_WRITE);
+ tt_int_op(premature_event, ==, 0);
+
+end:
+ if (pair1[0] >= 0)
+ evutil_closesocket(pair1[0]);
+ if (pair1[1] >= 0)
+ evutil_closesocket(pair1[1]);
+ if (pair2[0] >= 0)
+ evutil_closesocket(pair2[0]);
+ if (pair2[1] >= 0)
+ evutil_closesocket(pair2[1]);
+ if (rev)
+ event_free(rev);
+ if (wev)
+ event_free(wev);
+ if (closeev)
+ event_free(closeev);
+ if (base)
+ event_base_free(base);
+}
+
+
+static void
+test_multiple(void)
+{
+ struct event ev, ev2;
+ int i;
+
+ /* Multiple read and write test */
+ setup_test("Multiple read/write: ");
+ memset(rbuf, 0, sizeof(rbuf));
+ for (i = 0; i < (int)sizeof(wbuf); i++)
+ wbuf[i] = i;
+
+ roff = woff = 0;
+ usepersist = 0;
+
+ event_set(&ev, pair[0], EV_WRITE, multiple_write_cb, &ev);
+ if (event_add(&ev, NULL) == -1)
+ exit(1);
+ event_set(&ev2, pair[1], EV_READ, multiple_read_cb, &ev2);
+ if (event_add(&ev2, NULL) == -1)
+ exit(1);
+ event_dispatch();
+
+ if (roff == woff)
+ test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0;
+
+ cleanup_test();
+}
+
+static void
+test_persistent(void)
+{
+ struct event ev, ev2;
+ int i;
+
+ /* Multiple read and write test with persist */
+ setup_test("Persist read/write: ");
+ memset(rbuf, 0, sizeof(rbuf));
+ for (i = 0; i < (int)sizeof(wbuf); i++)
+ wbuf[i] = i;
+
+ roff = woff = 0;
+ usepersist = 1;
+
+ event_set(&ev, pair[0], EV_WRITE|EV_PERSIST, multiple_write_cb, &ev);
+ if (event_add(&ev, NULL) == -1)
+ exit(1);
+ event_set(&ev2, pair[1], EV_READ|EV_PERSIST, multiple_read_cb, &ev2);
+ if (event_add(&ev2, NULL) == -1)
+ exit(1);
+ event_dispatch();
+
+ if (roff == woff)
+ test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0;
+
+ cleanup_test();
+}
+
+static void
+test_combined(void)
+{
+ struct both r1, r2, w1, w2;
+
+ setup_test("Combined read/write: ");
+ memset(&r1, 0, sizeof(r1));
+ memset(&r2, 0, sizeof(r2));
+ memset(&w1, 0, sizeof(w1));
+ memset(&w2, 0, sizeof(w2));
+
+ w1.nread = 4096;
+ w2.nread = 8192;
+
+ event_set(&r1.ev, pair[0], EV_READ, combined_read_cb, &r1);
+ event_set(&w1.ev, pair[0], EV_WRITE, combined_write_cb, &w1);
+ event_set(&r2.ev, pair[1], EV_READ, combined_read_cb, &r2);
+ event_set(&w2.ev, pair[1], EV_WRITE, combined_write_cb, &w2);
+ tt_assert(event_add(&r1.ev, NULL) != -1);
+ tt_assert(!event_add(&w1.ev, NULL));
+ tt_assert(!event_add(&r2.ev, NULL));
+ tt_assert(!event_add(&w2.ev, NULL));
+ event_dispatch();
+
+ if (r1.nread == 8192 && r2.nread == 4096)
+ test_ok = 1;
+
+end:
+ cleanup_test();
+}
+
+static void
+test_simpletimeout(void)
+{
+ struct timeval tv;
+ struct event ev;
+
+ setup_test("Simple timeout: ");
+
+ tv.tv_usec = 200*1000;
+ tv.tv_sec = 0;
+ evutil_timerclear(&tcalled);
+ evtimer_set(&ev, timeout_cb, NULL);
+ evtimer_add(&ev, &tv);
+
+ evutil_gettimeofday(&tset, NULL);
+ event_dispatch();
+ test_timeval_diff_eq(&tset, &tcalled, 200);
+
+ test_ok = 1;
+end:
+ cleanup_test();
+}
+
+static void
+periodic_timeout_cb(evutil_socket_t fd, short event, void *arg)
+{
+ int *count = arg;
+
+ (*count)++;
+ if (*count == 6) {
+ /* call loopexit only once - on slow machines(?), it is
+ * apparently possible for this to get called twice. */
+ test_ok = 1;
+ event_base_loopexit(global_base, NULL);
+ }
+}
+
+static void
+test_persistent_timeout(void)
+{
+ struct timeval tv;
+ struct event ev;
+ int count = 0;
+
+ evutil_timerclear(&tv);
+ tv.tv_usec = 10000;
+
+ event_assign(&ev, global_base, -1, EV_TIMEOUT|EV_PERSIST,
+ periodic_timeout_cb, &count);
+ event_add(&ev, &tv);
+
+ event_dispatch();
+
+ event_del(&ev);
+}
+
+static void
+test_persistent_timeout_jump(void *ptr)
+{
+ struct basic_test_data *data = ptr;
+ struct event ev;
+ int count = 0;
+ struct timeval msec100 = { 0, 100 * 1000 };
+ struct timeval msec50 = { 0, 50 * 1000 };
+ struct timeval msec300 = { 0, 300 * 1000 };
+
+ event_assign(&ev, data->base, -1, EV_PERSIST, periodic_timeout_cb, &count);
+ event_add(&ev, &msec100);
+ /* Wait for a bit */
+ evutil_usleep_(&msec300);
+ event_base_loopexit(data->base, &msec50);
+ event_base_dispatch(data->base);
+ tt_int_op(count, ==, 1);
+
+end:
+ event_del(&ev);
+}
+
+struct persist_active_timeout_called {
+ int n;
+ short events[16];
+ struct timeval tvs[16];
+};
+
+static void
+activate_cb(evutil_socket_t fd, short event, void *arg)
+{
+ struct event *ev = arg;
+ event_active(ev, EV_READ, 1);
+}
+
+static void
+persist_active_timeout_cb(evutil_socket_t fd, short event, void *arg)
+{
+ struct persist_active_timeout_called *c = arg;
+ if (c->n < 15) {
+ c->events[c->n] = event;
+ evutil_gettimeofday(&c->tvs[c->n], NULL);
+ ++c->n;
+ }
+}
+
+static void
+test_persistent_active_timeout(void *ptr)
+{
+ struct timeval tv, tv2, tv_exit, start;
+ struct event ev;
+ struct persist_active_timeout_called res;
+
+ struct basic_test_data *data = ptr;
+ struct event_base *base = data->base;
+
+ memset(&res, 0, sizeof(res));
+
+ tv.tv_sec = 0;
+ tv.tv_usec = 200 * 1000;
+ event_assign(&ev, base, -1, EV_TIMEOUT|EV_PERSIST,
+ persist_active_timeout_cb, &res);
+ event_add(&ev, &tv);
+
+ tv2.tv_sec = 0;
+ tv2.tv_usec = 100 * 1000;
+ event_base_once(base, -1, EV_TIMEOUT, activate_cb, &ev, &tv2);
+
+ tv_exit.tv_sec = 0;
+ tv_exit.tv_usec = 600 * 1000;
+ event_base_loopexit(base, &tv_exit);
+
+ event_base_assert_ok_(base);
+ evutil_gettimeofday(&start, NULL);
+
+ event_base_dispatch(base);
+ event_base_assert_ok_(base);
+
+ tt_int_op(res.n, ==, 3);
+ tt_int_op(res.events[0], ==, EV_READ);
+ tt_int_op(res.events[1], ==, EV_TIMEOUT);
+ tt_int_op(res.events[2], ==, EV_TIMEOUT);
+ test_timeval_diff_eq(&start, &res.tvs[0], 100);
+ test_timeval_diff_eq(&start, &res.tvs[1], 300);
+ test_timeval_diff_eq(&start, &res.tvs[2], 500);
+end:
+ event_del(&ev);
+}
+
+struct common_timeout_info {
+ struct event ev;
+ struct timeval called_at;
+ int which;
+ int count;
+};
+
+static void
+common_timeout_cb(evutil_socket_t fd, short event, void *arg)
+{
+ struct common_timeout_info *ti = arg;
+ ++ti->count;
+ evutil_gettimeofday(&ti->called_at, NULL);
+ if (ti->count >= 4)
+ event_del(&ti->ev);
+}
+
+static void
+test_common_timeout(void *ptr)
+{
+ struct basic_test_data *data = ptr;
+
+ struct event_base *base = data->base;
+ int i;
+ struct common_timeout_info info[100];
+
+ struct timeval start;
+ struct timeval tmp_100_ms = { 0, 100*1000 };
+ struct timeval tmp_200_ms = { 0, 200*1000 };
+ struct timeval tmp_5_sec = { 5, 0 };
+ struct timeval tmp_5M_usec = { 0, 5*1000*1000 };
+
+ const struct timeval *ms_100, *ms_200, *sec_5;
+
+ ms_100 = event_base_init_common_timeout(base, &tmp_100_ms);
+ ms_200 = event_base_init_common_timeout(base, &tmp_200_ms);
+ sec_5 = event_base_init_common_timeout(base, &tmp_5_sec);
+ tt_assert(ms_100);
+ tt_assert(ms_200);
+ tt_assert(sec_5);
+ tt_ptr_op(event_base_init_common_timeout(base, &tmp_200_ms),
+ ==, ms_200);
+ tt_ptr_op(event_base_init_common_timeout(base, ms_200), ==, ms_200);
+ tt_ptr_op(event_base_init_common_timeout(base, &tmp_5M_usec), ==, sec_5);
+ tt_int_op(ms_100->tv_sec, ==, 0);
+ tt_int_op(ms_200->tv_sec, ==, 0);
+ tt_int_op(sec_5->tv_sec, ==, 5);
+ tt_int_op(ms_100->tv_usec, ==, 100000|0x50000000);
+ tt_int_op(ms_200->tv_usec, ==, 200000|0x50100000);
+ tt_int_op(sec_5->tv_usec, ==, 0|0x50200000);
+
+ memset(info, 0, sizeof(info));
+
+ for (i=0; i<100; ++i) {
+ info[i].which = i;
+ event_assign(&info[i].ev, base, -1, EV_TIMEOUT|EV_PERSIST,
+ common_timeout_cb, &info[i]);
+ if (i % 2) {
+ if ((i%20)==1) {
+ /* Glass-box test: Make sure we survive the
+ * transition to non-common timeouts. It's
+ * a little tricky. */
+ event_add(&info[i].ev, ms_200);
+ event_add(&info[i].ev, &tmp_100_ms);
+ } else if ((i%20)==3) {
+ /* Check heap-to-common too. */
+ event_add(&info[i].ev, &tmp_200_ms);
+ event_add(&info[i].ev, ms_100);
+ } else if ((i%20)==5) {
+ /* Also check common-to-common. */
+ event_add(&info[i].ev, ms_200);
+ event_add(&info[i].ev, ms_100);
+ } else {
+ event_add(&info[i].ev, ms_100);
+ }
+ } else {
+ event_add(&info[i].ev, ms_200);
+ }
+ }
+
+ event_base_assert_ok_(base);
+ evutil_gettimeofday(&start, NULL);
+ event_base_dispatch(base);
+
+ event_base_assert_ok_(base);
+
+ for (i=0; i<10; ++i) {
+ tt_int_op(info[i].count, ==, 4);
+ if (i % 2) {
+ test_timeval_diff_eq(&start, &info[i].called_at, 400);
+ } else {
+ test_timeval_diff_eq(&start, &info[i].called_at, 800);
+ }
+ }
+
+ /* Make sure we can free the base with some events in. */
+ for (i=0; i<100; ++i) {
+ if (i % 2) {
+ event_add(&info[i].ev, ms_100);
+ } else {
+ event_add(&info[i].ev, ms_200);
+ }
+ }
+
+end:
+ event_base_free(data->base); /* need to do this here before info is
+ * out-of-scope */
+ data->base = NULL;
+}
+
+#ifndef _WIN32
+
+#define current_base event_global_current_base_
+extern struct event_base *current_base;
+
+static void
+fork_signal_cb(evutil_socket_t fd, short events, void *arg)
+{
+ event_del(arg);
+}
+
+
+static void
+test_fork(void)
+{
+ int status;
+ struct event ev, sig_ev, usr_ev, existing_ev;
+ pid_t pid;
+
+ setup_test("After fork: ");
+
+ tt_assert(current_base);
+ evthread_make_base_notifiable(current_base);
+
+ if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
+ tt_fail_perror("write");
+ }
+
+ event_set(&ev, pair[1], EV_READ, simple_read_cb, &ev);
+ if (event_add(&ev, NULL) == -1)
+ exit(1);
+
+ evsignal_set(&sig_ev, SIGCHLD, fork_signal_cb, &sig_ev);
+ evsignal_add(&sig_ev, NULL);
+
+ evsignal_set(&existing_ev, SIGUSR2, fork_signal_cb, &existing_ev);
+ evsignal_add(&existing_ev, NULL);
+
+ event_base_assert_ok_(current_base);
+ TT_BLATHER(("Before fork"));
+ if ((pid = regress_fork()) == 0) {
+ /* in the child */
+ TT_BLATHER(("In child, before reinit"));
+ event_base_assert_ok_(current_base);
+ if (event_reinit(current_base) == -1) {
+ fprintf(stdout, "FAILED (reinit)\n");
+ exit(1);
+ }
+ TT_BLATHER(("After reinit"));
+ event_base_assert_ok_(current_base);
+ TT_BLATHER(("After assert-ok"));
+
+ evsignal_del(&sig_ev);
+
+ evsignal_set(&usr_ev, SIGUSR1, fork_signal_cb, &usr_ev);
+ evsignal_add(&usr_ev, NULL);
+ raise(SIGUSR1);
+ raise(SIGUSR2);
+
+ called = 0;
+
+ event_dispatch();
+
+ event_base_free(current_base);
+
+ /* we do not send an EOF; simple_read_cb requires an EOF
+ * to set test_ok. we just verify that the callback was
+ * called. */
+ exit(test_ok != 0 || called != 2 ? -2 : 76);
+ }
+
+ /* wait for the child to read the data */
+ {
+ const struct timeval tv = { 0, 100000 };
+ evutil_usleep_(&tv);
+ }
+
+ if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
+ tt_fail_perror("write");
+ }
+
+ TT_BLATHER(("Before waitpid"));
+ if (waitpid(pid, &status, 0) == -1) {
+ fprintf(stdout, "FAILED (fork)\n");
+ exit(1);
+ }
+ TT_BLATHER(("After waitpid"));
+
+ if (WEXITSTATUS(status) != 76) {
+ fprintf(stdout, "FAILED (exit): %d\n", WEXITSTATUS(status));
+ exit(1);
+ }
+
+ /* test that the current event loop still works */
+ if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
+ fprintf(stderr, "%s: write\n", __func__);
+ }
+
+ shutdown(pair[0], SHUT_WR);
+
+ evsignal_set(&usr_ev, SIGUSR1, fork_signal_cb, &usr_ev);
+ evsignal_add(&usr_ev, NULL);
+ raise(SIGUSR1);
+ raise(SIGUSR2);
+
+ event_dispatch();
+
+ evsignal_del(&sig_ev);
+
+ end:
+ cleanup_test();
+}
+
+#ifdef EVENT__HAVE_PTHREADS
+static void* del_wait_thread(void *arg)
+{
+ struct timeval tv_start, tv_end;
+
+ evutil_gettimeofday(&tv_start, NULL);
+ event_dispatch();
+ evutil_gettimeofday(&tv_end, NULL);
+
+ test_timeval_diff_eq(&tv_start, &tv_end, 300);
+
+ end:
+ return NULL;
+}
+
+static void
+del_wait_cb(evutil_socket_t fd, short event, void *arg)
+{
+ struct timeval delay = { 0, 300*1000 };
+ TT_BLATHER(("Sleeping"));
+ evutil_usleep_(&delay);
+ test_ok = 1;
+}
+
+static void
+test_del_wait(void)
+{
+ struct event ev;
+ pthread_t thread;
+
+ setup_test("event_del will wait: ");
+
+ event_set(&ev, pair[1], EV_READ, del_wait_cb, &ev);
+ event_add(&ev, NULL);
+
+ pthread_create(&thread, NULL, del_wait_thread, NULL);
+
+ if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
+ tt_fail_perror("write");
+ }
+
+ {
+ struct timeval delay = { 0, 30*1000 };
+ evutil_usleep_(&delay);
+ }
+
+ {
+ struct timeval tv_start, tv_end;
+ evutil_gettimeofday(&tv_start, NULL);
+ event_del(&ev);
+ evutil_gettimeofday(&tv_end, NULL);
+ test_timeval_diff_eq(&tv_start, &tv_end, 270);
+ }
+
+ pthread_join(thread, NULL);
+
+ end:
+ ;
+}
+#endif
+
+static void
+signal_cb_sa(int sig)
+{
+ test_ok = 2;
+}
+
+static void
+signal_cb(evutil_socket_t fd, short event, void *arg)
+{
+ struct event *ev = arg;
+
+ evsignal_del(ev);
+ test_ok = 1;
+}
+
+static void
+test_simplesignal_impl(int find_reorder)
+{
+ struct event ev;
+ struct itimerval itv;
+
+ evsignal_set(&ev, SIGALRM, signal_cb, &ev);
+ evsignal_add(&ev, NULL);
+ /* find bugs in which operations are re-ordered */
+ if (find_reorder) {
+ evsignal_del(&ev);
+ evsignal_add(&ev, NULL);
+ }
+
+ memset(&itv, 0, sizeof(itv));
+ itv.it_value.tv_sec = 0;
+ itv.it_value.tv_usec = 100000;
+ if (setitimer(ITIMER_REAL, &itv, NULL) == -1)
+ goto skip_simplesignal;
+
+ event_dispatch();
+ skip_simplesignal:
+ if (evsignal_del(&ev) == -1)
+ test_ok = 0;
+
+ cleanup_test();
+}
+
+static void
+test_simplestsignal(void)
+{
+ setup_test("Simplest one signal: ");
+ test_simplesignal_impl(0);
+}
+
+static void
+test_simplesignal(void)
+{
+ setup_test("Simple signal: ");
+ test_simplesignal_impl(1);
+}
+
+static void
+test_multiplesignal(void)
+{
+ struct event ev_one, ev_two;
+ struct itimerval itv;
+
+ setup_test("Multiple signal: ");
+
+ evsignal_set(&ev_one, SIGALRM, signal_cb, &ev_one);
+ evsignal_add(&ev_one, NULL);
+
+ evsignal_set(&ev_two, SIGALRM, signal_cb, &ev_two);
+ evsignal_add(&ev_two, NULL);
+
+ memset(&itv, 0, sizeof(itv));
+ itv.it_value.tv_sec = 0;
+ itv.it_value.tv_usec = 100000;
+ if (setitimer(ITIMER_REAL, &itv, NULL) == -1)
+ goto skip_simplesignal;
+
+ event_dispatch();
+
+ skip_simplesignal:
+ if (evsignal_del(&ev_one) == -1)
+ test_ok = 0;
+ if (evsignal_del(&ev_two) == -1)
+ test_ok = 0;
+
+ cleanup_test();
+}
+
+static void
+test_immediatesignal(void)
+{
+ struct event ev;
+
+ test_ok = 0;
+ evsignal_set(&ev, SIGUSR1, signal_cb, &ev);
+ evsignal_add(&ev, NULL);
+ raise(SIGUSR1);
+ event_loop(EVLOOP_NONBLOCK);
+ evsignal_del(&ev);
+ cleanup_test();
+}
+
+static void
+test_signal_dealloc(void)
+{
+ /* make sure that evsignal_event is event_del'ed and pipe closed */
+ struct event ev;
+ struct event_base *base = event_init();
+ evsignal_set(&ev, SIGUSR1, signal_cb, &ev);
+ evsignal_add(&ev, NULL);
+ evsignal_del(&ev);
+ event_base_free(base);
+ /* If we got here without asserting, we're fine. */
+ test_ok = 1;
+ cleanup_test();
+}
+
+static void
+test_signal_pipeloss(void)
+{
+ /* make sure that the base1 pipe is closed correctly. */
+ struct event_base *base1, *base2;
+ int pipe1;
+ test_ok = 0;
+ base1 = event_init();
+ pipe1 = base1->sig.ev_signal_pair[0];
+ base2 = event_init();
+ event_base_free(base2);
+ event_base_free(base1);
+ if (close(pipe1) != -1 || errno!=EBADF) {
+ /* fd must be closed, so second close gives -1, EBADF */
+ printf("signal pipe not closed. ");
+ test_ok = 0;
+ } else {
+ test_ok = 1;
+ }
+ cleanup_test();
+}
+
+/*
+ * make two bases to catch signals, use both of them. this only works
+ * for event mechanisms that use our signal pipe trick. kqueue handles
+ * signals internally, and all interested kqueues get all the signals.
+ */
+static void
+test_signal_switchbase(void)
+{
+ struct event ev1, ev2;
+ struct event_base *base1, *base2;
+ int is_kqueue;
+ test_ok = 0;
+ base1 = event_init();
+ base2 = event_init();
+ is_kqueue = !strcmp(event_get_method(),"kqueue");
+ evsignal_set(&ev1, SIGUSR1, signal_cb, &ev1);
+ evsignal_set(&ev2, SIGUSR1, signal_cb, &ev2);
+ if (event_base_set(base1, &ev1) ||
+ event_base_set(base2, &ev2) ||
+ event_add(&ev1, NULL) ||
+ event_add(&ev2, NULL)) {
+ fprintf(stderr, "%s: cannot set base, add\n", __func__);
+ exit(1);
+ }
+
+ tt_ptr_op(event_get_base(&ev1), ==, base1);
+ tt_ptr_op(event_get_base(&ev2), ==, base2);
+
+ test_ok = 0;
+ /* can handle signal before loop is called */
+ raise(SIGUSR1);
+ event_base_loop(base2, EVLOOP_NONBLOCK);
+ if (is_kqueue) {
+ if (!test_ok)
+ goto end;
+ test_ok = 0;
+ }
+ event_base_loop(base1, EVLOOP_NONBLOCK);
+ if (test_ok && !is_kqueue) {
+ test_ok = 0;
+
+ /* set base1 to handle signals */
+ event_base_loop(base1, EVLOOP_NONBLOCK);
+ raise(SIGUSR1);
+ event_base_loop(base1, EVLOOP_NONBLOCK);
+ event_base_loop(base2, EVLOOP_NONBLOCK);
+ }
+end:
+ event_base_free(base1);
+ event_base_free(base2);
+ cleanup_test();
+}
+
+/*
+ * assert that a signal event removed from the event queue really is
+ * removed - with no possibility of it's parent handler being fired.
+ */
+static void
+test_signal_assert(void)
+{
+ struct event ev;
+ struct event_base *base = event_init();
+ test_ok = 0;
+ /* use SIGCONT so we don't kill ourselves when we signal to nowhere */
+ evsignal_set(&ev, SIGCONT, signal_cb, &ev);
+ evsignal_add(&ev, NULL);
+ /*
+ * if evsignal_del() fails to reset the handler, it's current handler
+ * will still point to evsig_handler().
+ */
+ evsignal_del(&ev);
+
+ raise(SIGCONT);
+#if 0
+ /* only way to verify we were in evsig_handler() */
+ /* XXXX Now there's no longer a good way. */
+ if (base->sig.evsig_caught)
+ test_ok = 0;
+ else
+ test_ok = 1;
+#else
+ test_ok = 1;
+#endif
+
+ event_base_free(base);
+ cleanup_test();
+ return;
+}
+
+/*
+ * assert that we restore our previous signal handler properly.
+ */
+static void
+test_signal_restore(void)
+{
+ struct event ev;
+ struct event_base *base = event_init();
+#ifdef EVENT__HAVE_SIGACTION
+ struct sigaction sa;
+#endif
+
+ test_ok = 0;
+#ifdef EVENT__HAVE_SIGACTION
+ sa.sa_handler = signal_cb_sa;
+ sa.sa_flags = 0x0;
+ sigemptyset(&sa.sa_mask);
+ if (sigaction(SIGUSR1, &sa, NULL) == -1)
+ goto out;
+#else
+ if (signal(SIGUSR1, signal_cb_sa) == SIG_ERR)
+ goto out;
+#endif
+ evsignal_set(&ev, SIGUSR1, signal_cb, &ev);
+ evsignal_add(&ev, NULL);
+ evsignal_del(&ev);
+
+ raise(SIGUSR1);
+ /* 1 == signal_cb, 2 == signal_cb_sa, we want our previous handler */
+ if (test_ok != 2)
+ test_ok = 0;
+out:
+ event_base_free(base);
+ cleanup_test();
+ return;
+}
+
+static void
+signal_cb_swp(int sig, short event, void *arg)
+{
+ called++;
+ if (called < 5)
+ raise(sig);
+ else
+ event_loopexit(NULL);
+}
+static void
+timeout_cb_swp(evutil_socket_t fd, short event, void *arg)
+{
+ if (called == -1) {
+ struct timeval tv = {5, 0};
+
+ called = 0;
+ evtimer_add((struct event *)arg, &tv);
+ raise(SIGUSR1);
+ return;
+ }
+ test_ok = 0;
+ event_loopexit(NULL);
+}
+
+static void
+test_signal_while_processing(void)
+{
+ struct event_base *base = event_init();
+ struct event ev, ev_timer;
+ struct timeval tv = {0, 0};
+
+ setup_test("Receiving a signal while processing other signal: ");
+
+ called = -1;
+ test_ok = 1;
+ signal_set(&ev, SIGUSR1, signal_cb_swp, NULL);
+ signal_add(&ev, NULL);
+ evtimer_set(&ev_timer, timeout_cb_swp, &ev_timer);
+ evtimer_add(&ev_timer, &tv);
+ event_dispatch();
+
+ event_base_free(base);
+ cleanup_test();
+ return;
+}
+#endif
+
+static void
+test_free_active_base(void *ptr)
+{
+ struct basic_test_data *data = ptr;
+ struct event_base *base1;
+ struct event ev1;
+
+ base1 = event_init();
+ if (base1) {
+ event_assign(&ev1, base1, data->pair[1], EV_READ,
+ dummy_read_cb, NULL);
+ event_add(&ev1, NULL);
+ event_base_free(base1); /* should not crash */
+ } else {
+ tt_fail_msg("failed to create event_base for test");
+ }
+
+ base1 = event_init();
+ tt_assert(base1);
+ event_assign(&ev1, base1, 0, 0, dummy_read_cb, NULL);
+ event_active(&ev1, EV_READ, 1);
+ event_base_free(base1);
+end:
+ ;
+}
+
+static void
+test_manipulate_active_events(void *ptr)
+{
+ struct basic_test_data *data = ptr;
+ struct event_base *base = data->base;
+ struct event ev1;
+
+ event_assign(&ev1, base, -1, EV_TIMEOUT, dummy_read_cb, NULL);
+
+ /* Make sure an active event is pending. */
+ event_active(&ev1, EV_READ, 1);
+ tt_int_op(event_pending(&ev1, EV_READ|EV_TIMEOUT|EV_WRITE, NULL),
+ ==, EV_READ);
+
+ /* Make sure that activating an event twice works. */
+ event_active(&ev1, EV_WRITE, 1);
+ tt_int_op(event_pending(&ev1, EV_READ|EV_TIMEOUT|EV_WRITE, NULL),
+ ==, EV_READ|EV_WRITE);
+
+end:
+ event_del(&ev1);
+}
+
+static void
+event_selfarg_cb(evutil_socket_t fd, short event, void *arg)
+{
+ struct event *ev = arg;
+ struct event_base *base = event_get_base(ev);
+ event_base_assert_ok_(base);
+ event_base_loopexit(base, NULL);
+ tt_want(ev == event_base_get_running_event(base));
+}
+
+static void
+test_event_new_selfarg(void *ptr)
+{
+ struct basic_test_data *data = ptr;
+ struct event_base *base = data->base;
+ struct event *ev = event_new(base, -1, EV_READ, event_selfarg_cb,
+ event_self_cbarg());
+
+ event_active(ev, EV_READ, 1);
+ event_base_dispatch(base);
+
+ event_free(ev);
+}
+
+static void
+test_event_assign_selfarg(void *ptr)
+{
+ struct basic_test_data *data = ptr;
+ struct event_base *base = data->base;
+ struct event ev;
+
+ event_assign(&ev, base, -1, EV_READ, event_selfarg_cb,
+ event_self_cbarg());
+ event_active(&ev, EV_READ, 1);
+ event_base_dispatch(base);
+}
+
+static void
+test_event_base_get_num_events(void *ptr)
+{
+ struct basic_test_data *data = ptr;
+ struct event_base *base = data->base;
+ struct event ev;
+ int event_count_active;
+ int event_count_virtual;
+ int event_count_added;
+ int event_count_active_virtual;
+ int event_count_active_added;
+ int event_count_virtual_added;
+ int event_count_active_added_virtual;
+
+ struct timeval qsec = {0, 100000};
+
+ event_assign(&ev, base, -1, EV_READ, event_selfarg_cb,
+ event_self_cbarg());
+
+ event_add(&ev, &qsec);
+ event_count_active = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ACTIVE);
+ event_count_virtual = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_VIRTUAL);
+ event_count_added = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ADDED);
+ event_count_active_virtual = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
+ event_count_active_added = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
+ event_count_virtual_added = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
+ event_count_active_added_virtual = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ACTIVE|
+ EVENT_BASE_COUNT_ADDED|
+ EVENT_BASE_COUNT_VIRTUAL);
+ tt_int_op(event_count_active, ==, 0);
+ tt_int_op(event_count_virtual, ==, 0);
+ /* libevent itself adds a timeout event, so the event_count is 2 here */
+ tt_int_op(event_count_added, ==, 2);
+ tt_int_op(event_count_active_virtual, ==, 0);
+ tt_int_op(event_count_active_added, ==, 2);
+ tt_int_op(event_count_virtual_added, ==, 2);
+ tt_int_op(event_count_active_added_virtual, ==, 2);
+
+ event_active(&ev, EV_READ, 1);
+ event_count_active = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ACTIVE);
+ event_count_virtual = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_VIRTUAL);
+ event_count_added = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ADDED);
+ event_count_active_virtual = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
+ event_count_active_added = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
+ event_count_virtual_added = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
+ event_count_active_added_virtual = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ACTIVE|
+ EVENT_BASE_COUNT_ADDED|
+ EVENT_BASE_COUNT_VIRTUAL);
+ tt_int_op(event_count_active, ==, 1);
+ tt_int_op(event_count_virtual, ==, 0);
+ tt_int_op(event_count_added, ==, 3);
+ tt_int_op(event_count_active_virtual, ==, 1);
+ tt_int_op(event_count_active_added, ==, 4);
+ tt_int_op(event_count_virtual_added, ==, 3);
+ tt_int_op(event_count_active_added_virtual, ==, 4);
+
+ event_base_loop(base, 0);
+ event_count_active = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ACTIVE);
+ event_count_virtual = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_VIRTUAL);
+ event_count_added = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ADDED);
+ event_count_active_virtual = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
+ event_count_active_added = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
+ event_count_virtual_added = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
+ event_count_active_added_virtual = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ACTIVE|
+ EVENT_BASE_COUNT_ADDED|
+ EVENT_BASE_COUNT_VIRTUAL);
+ tt_int_op(event_count_active, ==, 0);
+ tt_int_op(event_count_virtual, ==, 0);
+ tt_int_op(event_count_added, ==, 0);
+ tt_int_op(event_count_active_virtual, ==, 0);
+ tt_int_op(event_count_active_added, ==, 0);
+ tt_int_op(event_count_virtual_added, ==, 0);
+ tt_int_op(event_count_active_added_virtual, ==, 0);
+
+ event_base_add_virtual_(base);
+ event_count_active = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ACTIVE);
+ event_count_virtual = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_VIRTUAL);
+ event_count_added = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ADDED);
+ event_count_active_virtual = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
+ event_count_active_added = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
+ event_count_virtual_added = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
+ event_count_active_added_virtual = event_base_get_num_events(base,
+ EVENT_BASE_COUNT_ACTIVE|
+ EVENT_BASE_COUNT_ADDED|
+ EVENT_BASE_COUNT_VIRTUAL);
+ tt_int_op(event_count_active, ==, 0);
+ tt_int_op(event_count_virtual, ==, 1);
+ tt_int_op(event_count_added, ==, 0);
+ tt_int_op(event_count_active_virtual, ==, 1);
+ tt_int_op(event_count_active_added, ==, 0);
+ tt_int_op(event_count_virtual_added, ==, 1);
+ tt_int_op(event_count_active_added_virtual, ==, 1);
+
+end:
+ ;
+}
+
+static void
+test_event_base_get_max_events(void *ptr)
+{
+ struct basic_test_data *data = ptr;
+ struct event_base *base = data->base;
+ struct event ev;
+ struct event ev2;
+ int event_count_active;
+ int event_count_virtual;
+ int event_count_added;
+ int event_count_active_virtual;
+ int event_count_active_added;
+ int event_count_virtual_added;
+ int event_count_active_added_virtual;
+
+ struct timeval qsec = {0, 100000};
+
+ event_assign(&ev, base, -1, EV_READ, event_selfarg_cb,
+ event_self_cbarg());
+ event_assign(&ev2, base, -1, EV_READ, event_selfarg_cb,
+ event_self_cbarg());
+
+ event_add(&ev, &qsec);
+ event_add(&ev2, &qsec);
+ event_del(&ev2);
+
+ event_count_active = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ACTIVE, 0);
+ event_count_virtual = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_VIRTUAL, 0);
+ event_count_added = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ADDED, 0);
+ event_count_active_virtual = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
+ event_count_active_added = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
+ event_count_virtual_added = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
+ event_count_active_added_virtual = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ACTIVE |
+ EVENT_BASE_COUNT_ADDED |
+ EVENT_BASE_COUNT_VIRTUAL, 0);
+
+ tt_int_op(event_count_active, ==, 0);
+ tt_int_op(event_count_virtual, ==, 0);
+ /* libevent itself adds a timeout event, so the event_count is 4 here */
+ tt_int_op(event_count_added, ==, 4);
+ tt_int_op(event_count_active_virtual, ==, 0);
+ tt_int_op(event_count_active_added, ==, 4);
+ tt_int_op(event_count_virtual_added, ==, 4);
+ tt_int_op(event_count_active_added_virtual, ==, 4);
+
+ event_active(&ev, EV_READ, 1);
+ event_count_active = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ACTIVE, 0);
+ event_count_virtual = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_VIRTUAL, 0);
+ event_count_added = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ADDED, 0);
+ event_count_active_virtual = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
+ event_count_active_added = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
+ event_count_virtual_added = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
+ event_count_active_added_virtual = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ACTIVE |
+ EVENT_BASE_COUNT_ADDED |
+ EVENT_BASE_COUNT_VIRTUAL, 0);
+
+ tt_int_op(event_count_active, ==, 1);
+ tt_int_op(event_count_virtual, ==, 0);
+ tt_int_op(event_count_added, ==, 4);
+ tt_int_op(event_count_active_virtual, ==, 1);
+ tt_int_op(event_count_active_added, ==, 5);
+ tt_int_op(event_count_virtual_added, ==, 4);
+ tt_int_op(event_count_active_added_virtual, ==, 5);
+
+ event_base_loop(base, 0);
+ event_count_active = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ACTIVE, 1);
+ event_count_virtual = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_VIRTUAL, 1);
+ event_count_added = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ADDED, 1);
+ event_count_active_virtual = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
+ event_count_active_added = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
+ event_count_virtual_added = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
+ event_count_active_added_virtual = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ACTIVE |
+ EVENT_BASE_COUNT_ADDED |
+ EVENT_BASE_COUNT_VIRTUAL, 1);
+
+ tt_int_op(event_count_active, ==, 1);
+ tt_int_op(event_count_virtual, ==, 0);
+ tt_int_op(event_count_added, ==, 4);
+ tt_int_op(event_count_active_virtual, ==, 0);
+ tt_int_op(event_count_active_added, ==, 0);
+ tt_int_op(event_count_virtual_added, ==, 0);
+ tt_int_op(event_count_active_added_virtual, ==, 0);
+
+ event_count_active = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ACTIVE, 0);
+ event_count_virtual = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_VIRTUAL, 0);
+ event_count_added = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ADDED, 0);
+ tt_int_op(event_count_active, ==, 0);
+ tt_int_op(event_count_virtual, ==, 0);
+ tt_int_op(event_count_added, ==, 0);
+
+ event_base_add_virtual_(base);
+ event_count_active = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ACTIVE, 0);
+ event_count_virtual = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_VIRTUAL, 0);
+ event_count_added = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ADDED, 0);
+ event_count_active_virtual = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
+ event_count_active_added = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
+ event_count_virtual_added = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
+ event_count_active_added_virtual = event_base_get_max_events(base,
+ EVENT_BASE_COUNT_ACTIVE |
+ EVENT_BASE_COUNT_ADDED |
+ EVENT_BASE_COUNT_VIRTUAL, 0);
+
+ tt_int_op(event_count_active, ==, 0);
+ tt_int_op(event_count_virtual, ==, 1);
+ tt_int_op(event_count_added, ==, 0);
+ tt_int_op(event_count_active_virtual, ==, 1);
+ tt_int_op(event_count_active_added, ==, 0);
+ tt_int_op(event_count_virtual_added, ==, 1);
+ tt_int_op(event_count_active_added_virtual, ==, 1);
+
+end:
+ ;
+}
+
+static void
+test_bad_assign(void *ptr)
+{
+ struct event ev;
+ int r;
+ /* READ|SIGNAL is not allowed */
+ r = event_assign(&ev, NULL, -1, EV_SIGNAL|EV_READ, dummy_read_cb, NULL);
+ tt_int_op(r,==,-1);
+
+end:
+ ;
+}
+
+static int reentrant_cb_run = 0;
+
+static void
+bad_reentrant_run_loop_cb(evutil_socket_t fd, short what, void *ptr)
+{
+ struct event_base *base = ptr;
+ int r;
+ reentrant_cb_run = 1;
+ /* This reentrant call to event_base_loop should be detected and
+ * should fail */
+ r = event_base_loop(base, 0);
+ tt_int_op(r, ==, -1);
+end:
+ ;
+}
+
+static void
+test_bad_reentrant(void *ptr)
+{
+ struct basic_test_data *data = ptr;
+ struct event_base *base = data->base;
+ struct event ev;
+ int r;
+ event_assign(&ev, base, -1,
+ 0, bad_reentrant_run_loop_cb, base);
+
+ event_active(&ev, EV_WRITE, 1);
+ r = event_base_loop(base, 0);
+ tt_int_op(r, ==, 1);
+ tt_int_op(reentrant_cb_run, ==, 1);
+end:
+ ;
+}
+
+static int n_write_a_byte_cb=0;
+static int n_read_and_drain_cb=0;
+static int n_activate_other_event_cb=0;
+static void
+write_a_byte_cb(evutil_socket_t fd, short what, void *arg)
+{
+ char buf[] = "x";
+ if (write(fd, buf, 1) == 1)
+ ++n_write_a_byte_cb;
+}
+static void
+read_and_drain_cb(evutil_socket_t fd, short what, void *arg)
+{
+ char buf[128];
+ int n;
+ ++n_read_and_drain_cb;
+ while ((n = read(fd, buf, sizeof(buf))) > 0)
+ ;
+}
+
+static void
+activate_other_event_cb(evutil_socket_t fd, short what, void *other_)
+{
+ struct event *ev_activate = other_;
+ ++n_activate_other_event_cb;
+ event_active_later_(ev_activate, EV_READ);
+}
+
+static void
+test_active_later(void *ptr)
+{
+ struct basic_test_data *data = ptr;
+ struct event *ev1 = NULL, *ev2 = NULL;
+ struct event ev3, ev4;
+ struct timeval qsec = {0, 100000};
+ ev1 = event_new(data->base, data->pair[0], EV_READ|EV_PERSIST, read_and_drain_cb, NULL);
+ ev2 = event_new(data->base, data->pair[1], EV_WRITE|EV_PERSIST, write_a_byte_cb, NULL);
+ event_assign(&ev3, data->base, -1, 0, activate_other_event_cb, &ev4);
+ event_assign(&ev4, data->base, -1, 0, activate_other_event_cb, &ev3);
+ event_add(ev1, NULL);
+ event_add(ev2, NULL);
+ event_active_later_(&ev3, EV_READ);
+
+ event_base_loopexit(data->base, &qsec);
+
+ event_base_loop(data->base, 0);
+
+ TT_BLATHER(("%d write calls, %d read calls, %d activate-other calls.",
+ n_write_a_byte_cb, n_read_and_drain_cb, n_activate_other_event_cb));
+ event_del(&ev3);
+ event_del(&ev4);
+
+ tt_int_op(n_write_a_byte_cb, ==, n_activate_other_event_cb);
+ tt_int_op(n_write_a_byte_cb, >, 100);
+ tt_int_op(n_read_and_drain_cb, >, 100);
+ tt_int_op(n_activate_other_event_cb, >, 100);
+
+ event_active_later_(&ev4, EV_READ);
+ event_active(&ev4, EV_READ, 1); /* This should make the event
+ active immediately. */
+ tt_assert((ev4.ev_flags & EVLIST_ACTIVE) != 0);
+ tt_assert((ev4.ev_flags & EVLIST_ACTIVE_LATER) == 0);
+
+ /* Now leave this one around, so that event_free sees it and removes
+ * it. */
+ event_active_later_(&ev3, EV_READ);
+ event_base_assert_ok_(data->base);
+
+end:
+ if (ev1)
+ event_free(ev1);
+ if (ev2)
+ event_free(ev2);
+
+ event_base_free(data->base);
+ data->base = NULL;
+}
+
+
+static void incr_arg_cb(evutil_socket_t fd, short what, void *arg)
+{
+ int *intptr = arg;
+ (void) fd; (void) what;
+ ++*intptr;
+}
+static void remove_timers_cb(evutil_socket_t fd, short what, void *arg)
+{
+ struct event **ep = arg;
+ (void) fd; (void) what;
+ event_remove_timer(ep[0]);
+ event_remove_timer(ep[1]);
+}
+static void send_a_byte_cb(evutil_socket_t fd, short what, void *arg)
+{
+ evutil_socket_t *sockp = arg;
+ (void) fd; (void) what;
+ (void) write(*sockp, "A", 1);
+}
+struct read_not_timeout_param
+{
+ struct event **ev;
+ int events;
+ int count;
+};
+static void read_not_timeout_cb(evutil_socket_t fd, short what, void *arg)
+{
+ struct read_not_timeout_param *rntp = arg;
+ char c;
+ ev_ssize_t n;
+ (void) fd; (void) what;
+ n = read(fd, &c, 1);
+ tt_int_op(n, ==, 1);
+ rntp->events |= what;
+ ++rntp->count;
+ if(2 == rntp->count) event_del(rntp->ev[0]);
+end:
+ ;
+}
+
+static void
+test_event_remove_timeout(void *ptr)
+{
+ struct basic_test_data *data = ptr;
+ struct event_base *base = data->base;
+ struct event *ev[5];
+ int ev1_fired=0;
+ struct timeval ms25 = { 0, 25*1000 },
+ ms40 = { 0, 40*1000 },
+ ms75 = { 0, 75*1000 },
+ ms125 = { 0, 125*1000 };
+ struct read_not_timeout_param rntp = { ev, 0, 0 };
+
+ event_base_assert_ok_(base);
+
+ ev[0] = event_new(base, data->pair[0], EV_READ|EV_PERSIST,
+ read_not_timeout_cb, &rntp);
+ ev[1] = evtimer_new(base, incr_arg_cb, &ev1_fired);
+ ev[2] = evtimer_new(base, remove_timers_cb, ev);
+ ev[3] = evtimer_new(base, send_a_byte_cb, &data->pair[1]);
+ ev[4] = evtimer_new(base, send_a_byte_cb, &data->pair[1]);
+ tt_assert(base);
+ event_add(ev[2], &ms25); /* remove timers */
+ event_add(ev[4], &ms40); /* write to test if timer re-activates */
+ event_add(ev[0], &ms75); /* read */
+ event_add(ev[1], &ms75); /* timer */
+ event_add(ev[3], &ms125); /* timeout. */
+ event_base_assert_ok_(base);
+
+ event_base_dispatch(base);
+
+ tt_int_op(ev1_fired, ==, 0);
+ tt_int_op(rntp.events, ==, EV_READ);
+
+ event_base_assert_ok_(base);
+end:
+ event_free(ev[0]);
+ event_free(ev[1]);
+ event_free(ev[2]);
+ event_free(ev[3]);
+ event_free(ev[4]);
+}
+
+static void
+test_event_base_new(void *ptr)
+{
+ struct basic_test_data *data = ptr;
+ struct event_base *base = 0;
+ struct event ev1;
+ struct basic_cb_args args;
+
+ int towrite = (int)strlen(TEST1)+1;
+ int len = write(data->pair[0], TEST1, towrite);
+
+ if (len < 0)
+ tt_abort_perror("initial write");
+ else if (len != towrite)
+ tt_abort_printf(("initial write fell short (%d of %d bytes)",
+ len, towrite));
+
+ if (shutdown(data->pair[0], SHUT_WR))
+ tt_abort_perror("initial write shutdown");
+
+ base = event_base_new();
+ if (!base)
+ tt_abort_msg("failed to create event base");
+
+ args.eb = base;
+ args.ev = &ev1;
+ args.callcount = 0;
+ event_assign(&ev1, base, data->pair[1],
+ EV_READ|EV_PERSIST, basic_read_cb, &args);
+
+ if (event_add(&ev1, NULL))
+ tt_abort_perror("initial event_add");
+
+ if (event_base_loop(base, 0))
+ tt_abort_msg("unsuccessful exit from event loop");
+
+end:
+ if (base)
+ event_base_free(base);
+}
+
+static void
+test_loopexit(void)
+{
+ struct timeval tv, tv_start, tv_end;
+ struct event ev;
+
+ setup_test("Loop exit: ");
+
+ tv.tv_usec = 0;
+ tv.tv_sec = 60*60*24;
+ evtimer_set(&ev, timeout_cb, NULL);
+ evtimer_add(&ev, &tv);
+
+ tv.tv_usec = 300*1000;
+ tv.tv_sec = 0;
+ event_loopexit(&tv);
+
+ evutil_gettimeofday(&tv_start, NULL);
+ event_dispatch();
+ evutil_gettimeofday(&tv_end, NULL);
+
+ evtimer_del(&ev);
+
+ tt_assert(event_base_got_exit(global_base));
+ tt_assert(!event_base_got_break(global_base));
+
+ test_timeval_diff_eq(&tv_start, &tv_end, 300);
+
+ test_ok = 1;
+end:
+ cleanup_test();
+}
+
+static void
+test_loopexit_multiple(void)
+{
+ struct timeval tv, tv_start, tv_end;
+ struct event_base *base;
+
+ setup_test("Loop Multiple exit: ");
+
+ base = event_base_new();
+
+ tv.tv_usec = 200*1000;
+ tv.tv_sec = 0;
+ event_base_loopexit(base, &tv);
+
+ tv.tv_usec = 0;
+ tv.tv_sec = 3;
+ event_base_loopexit(base, &tv);
+
+ evutil_gettimeofday(&tv_start, NULL);
+ event_base_dispatch(base);
+ evutil_gettimeofday(&tv_end, NULL);
+
+ tt_assert(event_base_got_exit(base));
+ tt_assert(!event_base_got_break(base));
+
+ event_base_free(base);
+
+ test_timeval_diff_eq(&tv_start, &tv_end, 200);
+
+ test_ok = 1;
+
+end:
+ cleanup_test();
+}
+
+static void
+break_cb(evutil_socket_t fd, short events, void *arg)
+{
+ test_ok = 1;
+ event_loopbreak();
+}
+
+static void
+fail_cb(evutil_socket_t fd, short events, void *arg)
+{
+ test_ok = 0;
+}
+
+static void
+test_loopbreak(void)
+{
+ struct event ev1, ev2;
+ struct timeval tv;
+
+ setup_test("Loop break: ");
+
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ evtimer_set(&ev1, break_cb, NULL);
+ evtimer_add(&ev1, &tv);
+ evtimer_set(&ev2, fail_cb, NULL);
+ evtimer_add(&ev2, &tv);
+
+ event_dispatch();
+
+ tt_assert(!event_base_got_exit(global_base));
+ tt_assert(event_base_got_break(global_base));
+
+ evtimer_del(&ev1);
+ evtimer_del(&ev2);
+
+end:
+ cleanup_test();
+}
+
+static struct event *readd_test_event_last_added = NULL;
+static void
+re_add_read_cb(evutil_socket_t fd, short event, void *arg)
+{
+ char buf[256];
+ struct event *ev_other = arg;
+ ev_ssize_t n_read;
+
+ readd_test_event_last_added = ev_other;
+
+ n_read = read(fd, buf, sizeof(buf));
+
+ if (n_read < 0) {
+ tt_fail_perror("read");
+ event_base_loopbreak(event_get_base(ev_other));
+ return;
+ } else {
+ event_add(ev_other, NULL);
+ ++test_ok;
+ }
+}
+
+static void
+test_nonpersist_readd(void)
+{
+ struct event ev1, ev2;
+
+ setup_test("Re-add nonpersistent events: ");
+ event_set(&ev1, pair[0], EV_READ, re_add_read_cb, &ev2);
+ event_set(&ev2, pair[1], EV_READ, re_add_read_cb, &ev1);
+
+ if (write(pair[0], "Hello", 5) < 0) {
+ tt_fail_perror("write(pair[0])");
+ }
+
+ if (write(pair[1], "Hello", 5) < 0) {
+ tt_fail_perror("write(pair[1])\n");
+ }
+
+ if (event_add(&ev1, NULL) == -1 ||
+ event_add(&ev2, NULL) == -1) {
+ test_ok = 0;
+ }
+ if (test_ok != 0)
+ exit(1);
+ event_loop(EVLOOP_ONCE);
+ if (test_ok != 2)
+ exit(1);
+ /* At this point, we executed both callbacks. Whichever one got
+ * called first added the second, but the second then immediately got
+ * deleted before its callback was called. At this point, though, it
+ * re-added the first.
+ */
+ if (!readd_test_event_last_added) {
+ test_ok = 0;
+ } else if (readd_test_event_last_added == &ev1) {
+ if (!event_pending(&ev1, EV_READ, NULL) ||
+ event_pending(&ev2, EV_READ, NULL))
+ test_ok = 0;
+ } else {
+ if (event_pending(&ev1, EV_READ, NULL) ||
+ !event_pending(&ev2, EV_READ, NULL))
+ test_ok = 0;
+ }
+
+ event_del(&ev1);
+ event_del(&ev2);
+
+ cleanup_test();
+}
+
+struct test_pri_event {
+ struct event ev;
+ int count;
+};
+
+static void
+test_priorities_cb(evutil_socket_t fd, short what, void *arg)
+{
+ struct test_pri_event *pri = arg;
+ struct timeval tv;
+
+ if (pri->count == 3) {
+ event_loopexit(NULL);
+ return;
+ }
+
+ pri->count++;
+
+ evutil_timerclear(&tv);
+ event_add(&pri->ev, &tv);
+}
+
+static void
+test_priorities_impl(int npriorities)
+{
+ struct test_pri_event one, two;
+ struct timeval tv;
+
+ TT_BLATHER(("Testing Priorities %d: ", npriorities));
+
+ event_base_priority_init(global_base, npriorities);
+
+ memset(&one, 0, sizeof(one));
+ memset(&two, 0, sizeof(two));
+
+ timeout_set(&one.ev, test_priorities_cb, &one);
+ if (event_priority_set(&one.ev, 0) == -1) {
+ fprintf(stderr, "%s: failed to set priority", __func__);
+ exit(1);
+ }
+
+ timeout_set(&two.ev, test_priorities_cb, &two);
+ if (event_priority_set(&two.ev, npriorities - 1) == -1) {
+ fprintf(stderr, "%s: failed to set priority", __func__);
+ exit(1);
+ }
+
+ evutil_timerclear(&tv);
+
+ if (event_add(&one.ev, &tv) == -1)
+ exit(1);
+ if (event_add(&two.ev, &tv) == -1)
+ exit(1);
+
+ event_dispatch();
+
+ event_del(&one.ev);
+ event_del(&two.ev);
+
+ if (npriorities == 1) {
+ if (one.count == 3 && two.count == 3)
+ test_ok = 1;
+ } else if (npriorities == 2) {
+ /* Two is called once because event_loopexit is priority 1 */
+ if (one.count == 3 && two.count == 1)
+ test_ok = 1;
+ } else {
+ if (one.count == 3 && two.count == 0)
+ test_ok = 1;
+ }
+}
+
+static void
+test_priorities(void)
+{
+ test_priorities_impl(1);
+ if (test_ok)
+ test_priorities_impl(2);
+ if (test_ok)
+ test_priorities_impl(3);
+}
+
+/* priority-active-inversion: activate a higher-priority event, and make sure
+ * it keeps us from running a lower-priority event first. */
+static int n_pai_calls = 0;
+static struct event pai_events[3];
+
+static void
+prio_active_inversion_cb(evutil_socket_t fd, short what, void *arg)
+{
+ int *call_order = arg;
+ *call_order = n_pai_calls++;
+ if (n_pai_calls == 1) {
+ /* This should activate later, even though it shares a
+ priority with us. */
+ event_active(&pai_events[1], EV_READ, 1);
+ /* This should activate next, since its priority is higher,
+ even though we activated it second. */
+ event_active(&pai_events[2], EV_TIMEOUT, 1);
+ }
+}
+
+static void
+test_priority_active_inversion(void *data_)
+{
+ struct basic_test_data *data = data_;
+ struct event_base *base = data->base;
+ int call_order[3];
+ int i;
+ tt_int_op(event_base_priority_init(base, 8), ==, 0);
+
+ n_pai_calls = 0;
+ memset(call_order, 0, sizeof(call_order));
+
+ for (i=0;i<3;++i) {
+ event_assign(&pai_events[i], data->base, -1, 0,
+ prio_active_inversion_cb, &call_order[i]);
+ }
+
+ event_priority_set(&pai_events[0], 4);
+ event_priority_set(&pai_events[1], 4);
+ event_priority_set(&pai_events[2], 0);
+
+ event_active(&pai_events[0], EV_WRITE, 1);
+
+ event_base_dispatch(base);
+ tt_int_op(n_pai_calls, ==, 3);
+ tt_int_op(call_order[0], ==, 0);
+ tt_int_op(call_order[1], ==, 2);
+ tt_int_op(call_order[2], ==, 1);
+end:
+ ;
+}
+
+
+static void
+test_multiple_cb(evutil_socket_t fd, short event, void *arg)
+{
+ if (event & EV_READ)
+ test_ok |= 1;
+ else if (event & EV_WRITE)
+ test_ok |= 2;
+}
+
+static void
+test_multiple_events_for_same_fd(void)
+{
+ struct event e1, e2;
+
+ setup_test("Multiple events for same fd: ");
+
+ event_set(&e1, pair[0], EV_READ, test_multiple_cb, NULL);
+ event_add(&e1, NULL);
+ event_set(&e2, pair[0], EV_WRITE, test_multiple_cb, NULL);
+ event_add(&e2, NULL);
+ event_loop(EVLOOP_ONCE);
+ event_del(&e2);
+
+ if (write(pair[1], TEST1, strlen(TEST1)+1) < 0) {
+ tt_fail_perror("write");
+ }
+
+ event_loop(EVLOOP_ONCE);
+ event_del(&e1);
+
+ if (test_ok != 3)
+ test_ok = 0;
+
+ cleanup_test();
+}
+
+int evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf);
+int evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf);
+int evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t number);
+int evtag_decode_tag(ev_uint32_t *pnumber, struct evbuffer *evbuf);
+
+static void
+read_once_cb(evutil_socket_t fd, short event, void *arg)
+{
+ char buf[256];
+ int len;
+
+ len = read(fd, buf, sizeof(buf));
+
+ if (called) {
+ test_ok = 0;
+ } else if (len) {
+ /* Assumes global pair[0] can be used for writing */
+ if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
+ tt_fail_perror("write");
+ test_ok = 0;
+ } else {
+ test_ok = 1;
+ }
+ }
+
+ called++;
+}
+
+static void
+test_want_only_once(void)
+{
+ struct event ev;
+ struct timeval tv;
+
+ /* Very simple read test */
+ setup_test("Want read only once: ");
+
+ if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
+ tt_fail_perror("write");
+ }
+
+ /* Setup the loop termination */
+ evutil_timerclear(&tv);
+ tv.tv_usec = 300*1000;
+ event_loopexit(&tv);
+
+ event_set(&ev, pair[1], EV_READ, read_once_cb, &ev);
+ if (event_add(&ev, NULL) == -1)
+ exit(1);
+ event_dispatch();
+
+ cleanup_test();
+}
+
+#define TEST_MAX_INT 6
+
+static void
+evtag_int_test(void *ptr)
+{
+ struct evbuffer *tmp = evbuffer_new();
+ ev_uint32_t integers[TEST_MAX_INT] = {
+ 0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000
+ };
+ ev_uint32_t integer;
+ ev_uint64_t big_int;
+ int i;
+
+ evtag_init();
+
+ for (i = 0; i < TEST_MAX_INT; i++) {
+ int oldlen, newlen;
+ oldlen = (int)EVBUFFER_LENGTH(tmp);
+ evtag_encode_int(tmp, integers[i]);
+ newlen = (int)EVBUFFER_LENGTH(tmp);
+ TT_BLATHER(("encoded 0x%08x with %d bytes",
+ (unsigned)integers[i], newlen - oldlen));
+ big_int = integers[i];
+ big_int *= 1000000000; /* 1 billion */
+ evtag_encode_int64(tmp, big_int);
+ }
+
+ for (i = 0; i < TEST_MAX_INT; i++) {
+ tt_int_op(evtag_decode_int(&integer, tmp), !=, -1);
+ tt_uint_op(integer, ==, integers[i]);
+ tt_int_op(evtag_decode_int64(&big_int, tmp), !=, -1);
+ tt_assert((big_int / 1000000000) == integers[i]);
+ }
+
+ tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0);
+end:
+ evbuffer_free(tmp);
+}
+
+static void
+evtag_fuzz(void *ptr)
+{
+ unsigned char buffer[4096];
+ struct evbuffer *tmp = evbuffer_new();
+ struct timeval tv;
+ int i, j;
+
+ int not_failed = 0;
+
+ evtag_init();
+
+ for (j = 0; j < 100; j++) {
+ for (i = 0; i < (int)sizeof(buffer); i++)
+ buffer[i] = test_weakrand();
+ evbuffer_drain(tmp, -1);
+ evbuffer_add(tmp, buffer, sizeof(buffer));
+
+ if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1)
+ not_failed++;
+ }
+
+ /* The majority of decodes should fail */
+ tt_int_op(not_failed, <, 10);
+
+ /* Now insert some corruption into the tag length field */
+ evbuffer_drain(tmp, -1);
+ evutil_timerclear(&tv);
+ tv.tv_sec = 1;
+ evtag_marshal_timeval(tmp, 0, &tv);
+ evbuffer_add(tmp, buffer, sizeof(buffer));
+
+ ((char *)EVBUFFER_DATA(tmp))[1] = '\xff';
+ if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1) {
+ tt_abort_msg("evtag_unmarshal_timeval should have failed");
+ }
+
+end:
+ evbuffer_free(tmp);
+}
+
+static void
+evtag_tag_encoding(void *ptr)
+{
+ struct evbuffer *tmp = evbuffer_new();
+ ev_uint32_t integers[TEST_MAX_INT] = {
+ 0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000
+ };
+ ev_uint32_t integer;
+ int i;
+
+ evtag_init();
+
+ for (i = 0; i < TEST_MAX_INT; i++) {
+ int oldlen, newlen;
+ oldlen = (int)EVBUFFER_LENGTH(tmp);
+ evtag_encode_tag(tmp, integers[i]);
+ newlen = (int)EVBUFFER_LENGTH(tmp);
+ TT_BLATHER(("encoded 0x%08x with %d bytes",
+ (unsigned)integers[i], newlen - oldlen));
+ }
+
+ for (i = 0; i < TEST_MAX_INT; i++) {
+ tt_int_op(evtag_decode_tag(&integer, tmp), !=, -1);
+ tt_uint_op(integer, ==, integers[i]);
+ }
+
+ tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0);
+
+end:
+ evbuffer_free(tmp);
+}
+
+static void
+evtag_test_peek(void *ptr)
+{
+ struct evbuffer *tmp = evbuffer_new();
+ ev_uint32_t u32;
+
+ evtag_marshal_int(tmp, 30, 0);
+ evtag_marshal_string(tmp, 40, "Hello world");
+
+ tt_int_op(evtag_peek(tmp, &u32), ==, 1);
+ tt_int_op(u32, ==, 30);
+ tt_int_op(evtag_peek_length(tmp, &u32), ==, 0);
+ tt_int_op(u32, ==, 1+1+1);
+ tt_int_op(evtag_consume(tmp), ==, 0);
+
+ tt_int_op(evtag_peek(tmp, &u32), ==, 1);
+ tt_int_op(u32, ==, 40);
+ tt_int_op(evtag_peek_length(tmp, &u32), ==, 0);
+ tt_int_op(u32, ==, 1+1+11);
+ tt_int_op(evtag_payload_length(tmp, &u32), ==, 0);
+ tt_int_op(u32, ==, 11);
+
+end:
+ evbuffer_free(tmp);
+}
+
+
+static void
+test_methods(void *ptr)
+{
+ const char **methods = event_get_supported_methods();
+ struct event_config *cfg = NULL;
+ struct event_base *base = NULL;
+ const char *backend;
+ int n_methods = 0;
+
+ tt_assert(methods);
+
+ backend = methods[0];
+ while (*methods != NULL) {
+ TT_BLATHER(("Support method: %s", *methods));
+ ++methods;
+ ++n_methods;
+ }
+
+ cfg = event_config_new();
+ assert(cfg != NULL);
+
+ tt_int_op(event_config_avoid_method(cfg, backend), ==, 0);
+ event_config_set_flag(cfg, EVENT_BASE_FLAG_IGNORE_ENV);
+
+ base = event_base_new_with_config(cfg);
+ if (n_methods > 1) {
+ tt_assert(base);
+ tt_str_op(backend, !=, event_base_get_method(base));
+ } else {
+ tt_assert(base == NULL);
+ }
+
+end:
+ if (base)
+ event_base_free(base);
+ if (cfg)
+ event_config_free(cfg);
+}
+
+static void
+test_version(void *arg)
+{
+ const char *vstr;
+ ev_uint32_t vint;
+ int major, minor, patch, n;
+
+ vstr = event_get_version();
+ vint = event_get_version_number();
+
+ tt_assert(vstr);
+ tt_assert(vint);
+
+ tt_str_op(vstr, ==, LIBEVENT_VERSION);
+ tt_int_op(vint, ==, LIBEVENT_VERSION_NUMBER);
+
+ n = sscanf(vstr, "%d.%d.%d", &major, &minor, &patch);
+ tt_assert(3 == n);
+ tt_int_op((vint&0xffffff00), ==, ((major<<24)|(minor<<16)|(patch<<8)));
+end:
+ ;
+}
+
+static void
+test_base_features(void *arg)
+{
+ struct event_base *base = NULL;
+ struct event_config *cfg = NULL;
+
+ cfg = event_config_new();
+
+ tt_assert(0 == event_config_require_features(cfg, EV_FEATURE_ET));
+
+ base = event_base_new_with_config(cfg);
+ if (base) {
+ tt_int_op(EV_FEATURE_ET, ==,
+ event_base_get_features(base) & EV_FEATURE_ET);
+ } else {
+ base = event_base_new();
+ tt_int_op(0, ==, event_base_get_features(base) & EV_FEATURE_ET);
+ }
+
+end:
+ if (base)
+ event_base_free(base);
+ if (cfg)
+ event_config_free(cfg);
+}
+
+#ifdef EVENT__HAVE_SETENV
+#define SETENV_OK
+#elif !defined(EVENT__HAVE_SETENV) && defined(EVENT__HAVE_PUTENV)
+static void setenv(const char *k, const char *v, int o_)
+{
+ char b[256];
+ evutil_snprintf(b, sizeof(b), "%s=%s",k,v);
+ putenv(b);
+}
+#define SETENV_OK
+#endif
+
+#ifdef EVENT__HAVE_UNSETENV
+#define UNSETENV_OK
+#elif !defined(EVENT__HAVE_UNSETENV) && defined(EVENT__HAVE_PUTENV)
+static void unsetenv(const char *k)
+{
+ char b[256];
+ evutil_snprintf(b, sizeof(b), "%s=",k);
+ putenv(b);
+}
+#define UNSETENV_OK
+#endif
+
+#if defined(SETENV_OK) && defined(UNSETENV_OK)
+static void
+methodname_to_envvar(const char *mname, char *buf, size_t buflen)
+{
+ char *cp;
+ evutil_snprintf(buf, buflen, "EVENT_NO%s", mname);
+ for (cp = buf; *cp; ++cp) {
+ *cp = EVUTIL_TOUPPER_(*cp);
+ }
+}
+#endif
+
+static void
+test_base_environ(void *arg)
+{
+ struct event_base *base = NULL;
+ struct event_config *cfg = NULL;
+
+#if defined(SETENV_OK) && defined(UNSETENV_OK)
+ const char **basenames;
+ int i, n_methods=0;
+ char varbuf[128];
+ const char *defaultname, *ignoreenvname;
+
+ /* See if unsetenv works before we rely on it. */
+ setenv("EVENT_NOWAFFLES", "1", 1);
+ unsetenv("EVENT_NOWAFFLES");
+ if (getenv("EVENT_NOWAFFLES") != NULL) {
+#ifndef EVENT__HAVE_UNSETENV
+ TT_DECLARE("NOTE", ("Can't fake unsetenv; skipping test"));
+#else
+ TT_DECLARE("NOTE", ("unsetenv doesn't work; skipping test"));
+#endif
+ tt_skip();
+ }
+
+ basenames = event_get_supported_methods();
+ for (i = 0; basenames[i]; ++i) {
+ methodname_to_envvar(basenames[i], varbuf, sizeof(varbuf));
+ unsetenv(varbuf);
+ ++n_methods;
+ }
+
+ base = event_base_new();
+ tt_assert(base);
+
+ defaultname = event_base_get_method(base);
+ TT_BLATHER(("default is <%s>", defaultname));
+ event_base_free(base);
+ base = NULL;
+
+ /* Can we disable the method with EVENT_NOfoo ? */
+ if (!strcmp(defaultname, "epoll (with changelist)")) {
+ setenv("EVENT_NOEPOLL", "1", 1);
+ ignoreenvname = "epoll";
+ } else {
+ methodname_to_envvar(defaultname, varbuf, sizeof(varbuf));
+ setenv(varbuf, "1", 1);
+ ignoreenvname = defaultname;
+ }
+
+ /* Use an empty cfg rather than NULL so a failure doesn't exit() */
+ cfg = event_config_new();
+ base = event_base_new_with_config(cfg);
+ event_config_free(cfg);
+ cfg = NULL;
+ if (n_methods == 1) {
+ tt_assert(!base);
+ } else {
+ tt_assert(base);
+ tt_str_op(defaultname, !=, event_base_get_method(base));
+ event_base_free(base);
+ base = NULL;
+ }
+
+ /* Can we disable looking at the environment with IGNORE_ENV ? */
+ cfg = event_config_new();
+ event_config_set_flag(cfg, EVENT_BASE_FLAG_IGNORE_ENV);
+ base = event_base_new_with_config(cfg);
+ tt_assert(base);
+ tt_str_op(ignoreenvname, ==, event_base_get_method(base));
+#else
+ tt_skip();
+#endif
+
+end:
+ if (base)
+ event_base_free(base);
+ if (cfg)
+ event_config_free(cfg);
+}
+
+static void
+read_called_once_cb(evutil_socket_t fd, short event, void *arg)
+{
+ tt_int_op(event, ==, EV_READ);
+ called += 1;
+end:
+ ;
+}
+
+static void
+timeout_called_once_cb(evutil_socket_t fd, short event, void *arg)
+{
+ tt_int_op(event, ==, EV_TIMEOUT);
+ called += 100;
+end:
+ ;
+}
+
+static void
+immediate_called_twice_cb(evutil_socket_t fd, short event, void *arg)
+{
+ tt_int_op(event, ==, EV_TIMEOUT);
+ called += 1000;
+end:
+ ;
+}
+
+static void
+test_event_once(void *ptr)
+{
+ struct basic_test_data *data = ptr;
+ struct timeval tv;
+ int r;
+
+ tv.tv_sec = 0;
+ tv.tv_usec = 50*1000;
+ called = 0;
+ r = event_base_once(data->base, data->pair[0], EV_READ,
+ read_called_once_cb, NULL, NULL);
+ tt_int_op(r, ==, 0);
+ r = event_base_once(data->base, -1, EV_TIMEOUT,
+ timeout_called_once_cb, NULL, &tv);
+ tt_int_op(r, ==, 0);
+ r = event_base_once(data->base, -1, 0, NULL, NULL, NULL);
+ tt_int_op(r, <, 0);
+ r = event_base_once(data->base, -1, EV_TIMEOUT,
+ immediate_called_twice_cb, NULL, NULL);
+ tt_int_op(r, ==, 0);
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ r = event_base_once(data->base, -1, EV_TIMEOUT,
+ immediate_called_twice_cb, NULL, &tv);
+ tt_int_op(r, ==, 0);
+
+ if (write(data->pair[1], TEST1, strlen(TEST1)+1) < 0) {
+ tt_fail_perror("write");
+ }
+
+ shutdown(data->pair[1], SHUT_WR);
+
+ event_base_dispatch(data->base);
+
+ tt_int_op(called, ==, 2101);
+end:
+ ;
+}
+
+static void
+test_event_once_never(void *ptr)
+{
+ struct basic_test_data *data = ptr;
+ struct timeval tv;
+
+ /* Have one trigger in 10 seconds (don't worry, because) */
+ tv.tv_sec = 10;
+ tv.tv_usec = 0;
+ called = 0;
+ event_base_once(data->base, -1, EV_TIMEOUT,
+ timeout_called_once_cb, NULL, &tv);
+
+ /* But shut down the base in 75 msec. */
+ tv.tv_sec = 0;
+ tv.tv_usec = 75*1000;
+ event_base_loopexit(data->base, &tv);
+
+ event_base_dispatch(data->base);
+
+ tt_int_op(called, ==, 0);
+end:
+ ;
+}
+
+static void
+test_event_pending(void *ptr)
+{
+ struct basic_test_data *data = ptr;
+ struct event *r=NULL, *w=NULL, *t=NULL;
+ struct timeval tv, now, tv2;
+
+ tv.tv_sec = 0;
+ tv.tv_usec = 500 * 1000;
+ r = event_new(data->base, data->pair[0], EV_READ, simple_read_cb,
+ NULL);
+ w = event_new(data->base, data->pair[1], EV_WRITE, simple_write_cb,
+ NULL);
+ t = evtimer_new(data->base, timeout_cb, NULL);
+
+ tt_assert(r);
+ tt_assert(w);
+ tt_assert(t);
+
+ evutil_gettimeofday(&now, NULL);
+ event_add(r, NULL);
+ event_add(t, &tv);
+
+ tt_assert( event_pending(r, EV_READ, NULL));
+ tt_assert(!event_pending(w, EV_WRITE, NULL));
+ tt_assert(!event_pending(r, EV_WRITE, NULL));
+ tt_assert( event_pending(r, EV_READ|EV_WRITE, NULL));
+ tt_assert(!event_pending(r, EV_TIMEOUT, NULL));
+ tt_assert( event_pending(t, EV_TIMEOUT, NULL));
+ tt_assert( event_pending(t, EV_TIMEOUT, &tv2));
+
+ tt_assert(evutil_timercmp(&tv2, &now, >));
+
+ test_timeval_diff_eq(&now, &tv2, 500);
+
+end:
+ if (r) {
+ event_del(r);
+ event_free(r);
+ }
+ if (w) {
+ event_del(w);
+ event_free(w);
+ }
+ if (t) {
+ event_del(t);
+ event_free(t);
+ }
+}
+
+#ifndef _WIN32
+/* You can't do this test on windows, since dup2 doesn't work on sockets */
+
+static void
+dfd_cb(evutil_socket_t fd, short e, void *data)
+{
+ *(int*)data = (int)e;
+}
+
+/* Regression test for our workaround for a fun epoll/linux related bug
+ * where fd2 = dup(fd1); add(fd2); close(fd2); dup2(fd1,fd2); add(fd2)
+ * will get you an EEXIST */
+static void
+test_dup_fd(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+ struct event *ev1=NULL, *ev2=NULL;
+ int fd, dfd=-1;
+ int ev1_got, ev2_got;
+
+ tt_int_op(write(data->pair[0], "Hello world",
+ strlen("Hello world")), >, 0);
+ fd = data->pair[1];
+
+ dfd = dup(fd);
+ tt_int_op(dfd, >=, 0);
+
+ ev1 = event_new(base, fd, EV_READ|EV_PERSIST, dfd_cb, &ev1_got);
+ ev2 = event_new(base, dfd, EV_READ|EV_PERSIST, dfd_cb, &ev2_got);
+ ev1_got = ev2_got = 0;
+ event_add(ev1, NULL);
+ event_add(ev2, NULL);
+ event_base_loop(base, EVLOOP_ONCE);
+ tt_int_op(ev1_got, ==, EV_READ);
+ tt_int_op(ev2_got, ==, EV_READ);
+
+ /* Now close and delete dfd then dispatch. We need to do the
+ * dispatch here so that when we add it later, we think there
+ * was an intermediate delete. */
+ close(dfd);
+ event_del(ev2);
+ ev1_got = ev2_got = 0;
+ event_base_loop(base, EVLOOP_ONCE);
+ tt_want_int_op(ev1_got, ==, EV_READ);
+ tt_int_op(ev2_got, ==, 0);
+
+ /* Re-duplicate the fd. We need to get the same duplicated
+ * value that we closed to provoke the epoll quirk. Also, we
+ * need to change the events to write, or else the old lingering
+ * read event will make the test pass whether the change was
+ * successful or not. */
+ tt_int_op(dup2(fd, dfd), ==, dfd);
+ event_free(ev2);
+ ev2 = event_new(base, dfd, EV_WRITE|EV_PERSIST, dfd_cb, &ev2_got);
+ event_add(ev2, NULL);
+ ev1_got = ev2_got = 0;
+ event_base_loop(base, EVLOOP_ONCE);
+ tt_want_int_op(ev1_got, ==, EV_READ);
+ tt_int_op(ev2_got, ==, EV_WRITE);
+
+end:
+ if (ev1)
+ event_free(ev1);
+ if (ev2)
+ event_free(ev2);
+ if (dfd >= 0)
+ close(dfd);
+}
+#endif
+
+#ifdef EVENT__DISABLE_MM_REPLACEMENT
+static void
+test_mm_functions(void *arg)
+{
+ tinytest_set_test_skipped_();
+}
+#else
+static int
+check_dummy_mem_ok(void *mem_)
+{
+ char *mem = mem_;
+ mem -= 16;
+ return !memcmp(mem, "{[<guardedram>]}", 16);
+}
+
+static void *
+dummy_malloc(size_t len)
+{
+ char *mem = malloc(len+16);
+ memcpy(mem, "{[<guardedram>]}", 16);
+ return mem+16;
+}
+
+static void *
+dummy_realloc(void *mem_, size_t len)
+{
+ char *mem = mem_;
+ if (!mem)
+ return dummy_malloc(len);
+ tt_want(check_dummy_mem_ok(mem_));
+ mem -= 16;
+ mem = realloc(mem, len+16);
+ return mem+16;
+}
+
+static void
+dummy_free(void *mem_)
+{
+ char *mem = mem_;
+ tt_want(check_dummy_mem_ok(mem_));
+ mem -= 16;
+ free(mem);
+}
+
+static void
+test_mm_functions(void *arg)
+{
+ struct event_base *b = NULL;
+ struct event_config *cfg = NULL;
+ event_set_mem_functions(dummy_malloc, dummy_realloc, dummy_free);
+ cfg = event_config_new();
+ event_config_avoid_method(cfg, "Nonesuch");
+ b = event_base_new_with_config(cfg);
+ tt_assert(b);
+ tt_assert(check_dummy_mem_ok(b));
+end:
+ if (cfg)
+ event_config_free(cfg);
+ if (b)
+ event_base_free(b);
+}
+#endif
+
+static void
+many_event_cb(evutil_socket_t fd, short event, void *arg)
+{
+ int *calledp = arg;
+ *calledp += 1;
+}
+
+static void
+test_many_events(void *arg)
+{
+ /* Try 70 events that should all be ready at once. This will
+ * exercise the "resize" code on most of the backends, and will make
+ * sure that we can get past the 64-handle limit of some windows
+ * functions. */
+#define MANY 70
+
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+ int one_at_a_time = data->setup_data != NULL;
+ evutil_socket_t sock[MANY];
+ struct event *ev[MANY];
+ int called[MANY];
+ int i;
+ int loopflags = EVLOOP_NONBLOCK, evflags=0;
+ if (one_at_a_time) {
+ loopflags |= EVLOOP_ONCE;
+ evflags = EV_PERSIST;
+ }
+
+ memset(sock, 0xff, sizeof(sock));
+ memset(ev, 0, sizeof(ev));
+ memset(called, 0, sizeof(called));
+
+ for (i = 0; i < MANY; ++i) {
+ /* We need an event that will hit the backend, and that will
+ * be ready immediately. "Send a datagram" is an easy
+ * instance of that. */
+ sock[i] = socket(AF_INET, SOCK_DGRAM, 0);
+ tt_assert(sock[i] >= 0);
+ called[i] = 0;
+ ev[i] = event_new(base, sock[i], EV_WRITE|evflags,
+ many_event_cb, &called[i]);
+ event_add(ev[i], NULL);
+ if (one_at_a_time)
+ event_base_loop(base, EVLOOP_NONBLOCK|EVLOOP_ONCE);
+ }
+
+ event_base_loop(base, loopflags);
+
+ for (i = 0; i < MANY; ++i) {
+ if (one_at_a_time)
+ tt_int_op(called[i], ==, MANY - i + 1);
+ else
+ tt_int_op(called[i], ==, 1);
+ }
+
+end:
+ for (i = 0; i < MANY; ++i) {
+ if (ev[i])
+ event_free(ev[i]);
+ if (sock[i] >= 0)
+ evutil_closesocket(sock[i]);
+ }
+#undef MANY
+}
+
+static void
+test_struct_event_size(void *arg)
+{
+ tt_int_op(event_get_struct_event_size(), <=, sizeof(struct event));
+end:
+ ;
+}
+
+static void
+test_get_assignment(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+ struct event *ev1 = NULL;
+ const char *str = "foo";
+
+ struct event_base *b;
+ evutil_socket_t s;
+ short what;
+ event_callback_fn cb;
+ void *cb_arg;
+
+ ev1 = event_new(base, data->pair[1], EV_READ, dummy_read_cb, (void*)str);
+ event_get_assignment(ev1, &b, &s, &what, &cb, &cb_arg);
+
+ tt_ptr_op(b, ==, base);
+ tt_int_op(s, ==, data->pair[1]);
+ tt_int_op(what, ==, EV_READ);
+ tt_ptr_op(cb, ==, dummy_read_cb);
+ tt_ptr_op(cb_arg, ==, str);
+
+ /* Now make sure this doesn't crash. */
+ event_get_assignment(ev1, NULL, NULL, NULL, NULL, NULL);
+
+end:
+ if (ev1)
+ event_free(ev1);
+}
+
+struct foreach_helper {
+ int count;
+ const struct event *ev;
+};
+
+static int
+foreach_count_cb(const struct event_base *base, const struct event *ev, void *arg)
+{
+ struct foreach_helper *h = event_get_callback_arg(ev);
+ struct timeval *tv = arg;
+ if (event_get_callback(ev) != timeout_cb)
+ return 0;
+ tt_ptr_op(event_get_base(ev), ==, base);
+ tt_int_op(tv->tv_sec, ==, 10);
+ h->ev = ev;
+ h->count++;
+ return 0;
+end:
+ return -1;
+}
+
+static int
+foreach_find_cb(const struct event_base *base, const struct event *ev, void *arg)
+{
+ const struct event **ev_out = arg;
+ struct foreach_helper *h = event_get_callback_arg(ev);
+ if (event_get_callback(ev) != timeout_cb)
+ return 0;
+ if (h->count == 99) {
+ *ev_out = ev;
+ return 101;
+ }
+ return 0;
+}
+
+static void
+test_event_foreach(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+ struct event *ev[5];
+ struct foreach_helper visited[5];
+ int i;
+ struct timeval ten_sec = {10,0};
+ const struct event *ev_found = NULL;
+
+ for (i = 0; i < 5; ++i) {
+ visited[i].count = 0;
+ visited[i].ev = NULL;
+ ev[i] = event_new(base, -1, 0, timeout_cb, &visited[i]);
+ }
+
+ tt_int_op(-1, ==, event_base_foreach_event(NULL, foreach_count_cb, NULL));
+ tt_int_op(-1, ==, event_base_foreach_event(base, NULL, NULL));
+
+ event_add(ev[0], &ten_sec);
+ event_add(ev[1], &ten_sec);
+ event_active(ev[1], EV_TIMEOUT, 1);
+ event_active(ev[2], EV_TIMEOUT, 1);
+ event_add(ev[3], &ten_sec);
+ /* Don't touch ev[4]. */
+
+ tt_int_op(0, ==, event_base_foreach_event(base, foreach_count_cb,
+ &ten_sec));
+ tt_int_op(1, ==, visited[0].count);
+ tt_int_op(1, ==, visited[1].count);
+ tt_int_op(1, ==, visited[2].count);
+ tt_int_op(1, ==, visited[3].count);
+ tt_ptr_op(ev[0], ==, visited[0].ev);
+ tt_ptr_op(ev[1], ==, visited[1].ev);
+ tt_ptr_op(ev[2], ==, visited[2].ev);
+ tt_ptr_op(ev[3], ==, visited[3].ev);
+
+ visited[2].count = 99;
+ tt_int_op(101, ==, event_base_foreach_event(base, foreach_find_cb,
+ &ev_found));
+ tt_ptr_op(ev_found, ==, ev[2]);
+
+end:
+ for (i=0; i<5; ++i) {
+ event_free(ev[i]);
+ }
+}
+
+static struct event_base *cached_time_base = NULL;
+static int cached_time_reset = 0;
+static int cached_time_sleep = 0;
+static void
+cache_time_cb(evutil_socket_t fd, short what, void *arg)
+{
+ struct timeval *tv = arg;
+ tt_int_op(0, ==, event_base_gettimeofday_cached(cached_time_base, tv));
+ if (cached_time_sleep) {
+ struct timeval delay = { 0, 30*1000 };
+ evutil_usleep_(&delay);
+ }
+ if (cached_time_reset) {
+ event_base_update_cache_time(cached_time_base);
+ }
+end:
+ ;
+}
+
+static void
+test_gettimeofday_cached(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct event_config *cfg = NULL;
+ struct event_base *base = NULL;
+ struct timeval tv1, tv2, tv3, now;
+ struct event *ev1=NULL, *ev2=NULL, *ev3=NULL;
+ int cached_time_disable = strstr(data->setup_data, "disable") != NULL;
+
+ cfg = event_config_new();
+ if (cached_time_disable) {
+ event_config_set_flag(cfg, EVENT_BASE_FLAG_NO_CACHE_TIME);
+ }
+ cached_time_base = base = event_base_new_with_config(cfg);
+ tt_assert(base);
+
+ /* Try gettimeofday_cached outside of an event loop. */
+ evutil_gettimeofday(&now, NULL);
+ tt_int_op(0, ==, event_base_gettimeofday_cached(NULL, &tv1));
+ tt_int_op(0, ==, event_base_gettimeofday_cached(base, &tv2));
+ tt_int_op(timeval_msec_diff(&tv1, &tv2), <, 10);
+ tt_int_op(timeval_msec_diff(&tv1, &now), <, 10);
+
+ cached_time_reset = strstr(data->setup_data, "reset") != NULL;
+ cached_time_sleep = strstr(data->setup_data, "sleep") != NULL;
+
+ ev1 = event_new(base, -1, 0, cache_time_cb, &tv1);
+ ev2 = event_new(base, -1, 0, cache_time_cb, &tv2);
+ ev3 = event_new(base, -1, 0, cache_time_cb, &tv3);
+
+ event_active(ev1, EV_TIMEOUT, 1);
+ event_active(ev2, EV_TIMEOUT, 1);
+ event_active(ev3, EV_TIMEOUT, 1);
+
+ event_base_dispatch(base);
+
+ if (cached_time_reset && cached_time_sleep) {
+ tt_int_op(labs(timeval_msec_diff(&tv1,&tv2)), >, 10);
+ tt_int_op(labs(timeval_msec_diff(&tv2,&tv3)), >, 10);
+ } else if (cached_time_disable && cached_time_sleep) {
+ tt_int_op(labs(timeval_msec_diff(&tv1,&tv2)), >, 10);
+ tt_int_op(labs(timeval_msec_diff(&tv2,&tv3)), >, 10);
+ } else if (! cached_time_disable) {
+ tt_assert(evutil_timercmp(&tv1, &tv2, ==));
+ tt_assert(evutil_timercmp(&tv2, &tv3, ==));
+ }
+
+end:
+ if (ev1)
+ event_free(ev1);
+ if (ev2)
+ event_free(ev2);
+ if (ev3)
+ event_free(ev3);
+ if (base)
+ event_base_free(base);
+ if (cfg)
+ event_config_free(cfg);
+}
+
+static void
+tabf_cb(evutil_socket_t fd, short what, void *arg)
+{
+ int *ptr = arg;
+ *ptr = what;
+ *ptr += 0x10000;
+}
+
+static void
+test_active_by_fd(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+ struct event *ev1 = NULL, *ev2 = NULL, *ev3 = NULL, *ev4 = NULL;
+ int e1,e2,e3,e4;
+#ifndef _WIN32
+ struct event *evsig = NULL;
+ int es;
+#endif
+ struct timeval tenmin = { 600, 0 };
+
+ /* Ensure no crash on nonexistent FD. */
+ event_base_active_by_fd(base, 1000, EV_READ);
+
+ /* Ensure no crash on bogus FD. */
+ event_base_active_by_fd(base, -1, EV_READ);
+
+ /* Ensure no crash on nonexistent/bogus signal. */
+ event_base_active_by_signal(base, 1000);
+ event_base_active_by_signal(base, -1);
+
+ event_base_assert_ok_(base);
+
+ e1 = e2 = e3 = e4 = 0;
+ ev1 = event_new(base, data->pair[0], EV_READ, tabf_cb, &e1);
+ ev2 = event_new(base, data->pair[0], EV_WRITE, tabf_cb, &e2);
+ ev3 = event_new(base, data->pair[1], EV_READ, tabf_cb, &e3);
+ ev4 = event_new(base, data->pair[1], EV_READ, tabf_cb, &e4);
+ tt_assert(ev1);
+ tt_assert(ev2);
+ tt_assert(ev3);
+ tt_assert(ev4);
+#ifndef _WIN32
+ evsig = event_new(base, SIGHUP, EV_SIGNAL, tabf_cb, &es);
+ tt_assert(evsig);
+ event_add(evsig, &tenmin);
+#endif
+
+ event_add(ev1, &tenmin);
+ event_add(ev2, NULL);
+ event_add(ev3, NULL);
+ event_add(ev4, &tenmin);
+
+
+ event_base_assert_ok_(base);
+
+ /* Trigger 2, 3, 4 */
+ event_base_active_by_fd(base, data->pair[0], EV_WRITE);
+ event_base_active_by_fd(base, data->pair[1], EV_READ);
+#ifndef _WIN32
+ event_base_active_by_signal(base, SIGHUP);
+#endif
+
+ event_base_assert_ok_(base);
+
+ event_base_loop(base, EVLOOP_ONCE);
+
+ tt_int_op(e1, ==, 0);
+ tt_int_op(e2, ==, EV_WRITE | 0x10000);
+ tt_int_op(e3, ==, EV_READ | 0x10000);
+ /* Mask out EV_WRITE here, since it could be genuinely writeable. */
+ tt_int_op((e4 & ~EV_WRITE), ==, EV_READ | 0x10000);
+#ifndef _WIN32
+ tt_int_op(es, ==, EV_SIGNAL | 0x10000);
+#endif
+
+end:
+ if (ev1)
+ event_free(ev1);
+ if (ev2)
+ event_free(ev2);
+ if (ev3)
+ event_free(ev3);
+ if (ev4)
+ event_free(ev4);
+#ifndef _WIN32
+ if (evsig)
+ event_free(evsig);
+#endif
+}
+
+struct testcase_t main_testcases[] = {
+ /* Some converted-over tests */
+ { "methods", test_methods, TT_FORK, NULL, NULL },
+ { "version", test_version, 0, NULL, NULL },
+ BASIC(base_features, TT_FORK|TT_NO_LOGS),
+ { "base_environ", test_base_environ, TT_FORK, NULL, NULL },
+
+ BASIC(event_base_new, TT_FORK|TT_NEED_SOCKETPAIR),
+ BASIC(free_active_base, TT_FORK|TT_NEED_SOCKETPAIR),
+
+ BASIC(manipulate_active_events, TT_FORK|TT_NEED_BASE),
+ BASIC(event_new_selfarg, TT_FORK|TT_NEED_BASE),
+ BASIC(event_assign_selfarg, TT_FORK|TT_NEED_BASE),
+ BASIC(event_base_get_num_events, TT_FORK|TT_NEED_BASE),
+ BASIC(event_base_get_max_events, TT_FORK|TT_NEED_BASE),
+
+ BASIC(bad_assign, TT_FORK|TT_NEED_BASE|TT_NO_LOGS),
+ BASIC(bad_reentrant, TT_FORK|TT_NEED_BASE|TT_NO_LOGS),
+ BASIC(active_later, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR),
+ BASIC(event_remove_timeout, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR),
+
+ /* These are still using the old API */
+ LEGACY(persistent_timeout, TT_FORK|TT_NEED_BASE),
+ { "persistent_timeout_jump", test_persistent_timeout_jump, TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
+ { "persistent_active_timeout", test_persistent_active_timeout,
+ TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
+ LEGACY(priorities, TT_FORK|TT_NEED_BASE),
+ BASIC(priority_active_inversion, TT_FORK|TT_NEED_BASE),
+ { "common_timeout", test_common_timeout, TT_FORK|TT_NEED_BASE,
+ &basic_setup, NULL },
+
+ /* These legacy tests may not all need all of these flags. */
+ LEGACY(simpleread, TT_ISOLATED),
+ LEGACY(simpleread_multiple, TT_ISOLATED),
+ LEGACY(simplewrite, TT_ISOLATED),
+ { "simpleclose", test_simpleclose, TT_FORK, &basic_setup,
+ NULL },
+ LEGACY(multiple, TT_ISOLATED),
+ LEGACY(persistent, TT_ISOLATED),
+ LEGACY(combined, TT_ISOLATED),
+ LEGACY(simpletimeout, TT_ISOLATED),
+ LEGACY(loopbreak, TT_ISOLATED),
+ LEGACY(loopexit, TT_ISOLATED),
+ LEGACY(loopexit_multiple, TT_ISOLATED),
+ LEGACY(nonpersist_readd, TT_ISOLATED),
+ LEGACY(multiple_events_for_same_fd, TT_ISOLATED),
+ LEGACY(want_only_once, TT_ISOLATED),
+ { "event_once", test_event_once, TT_ISOLATED, &basic_setup, NULL },
+ { "event_once_never", test_event_once_never, TT_ISOLATED, &basic_setup, NULL },
+ { "event_pending", test_event_pending, TT_ISOLATED, &basic_setup,
+ NULL },
+#ifndef _WIN32
+ { "dup_fd", test_dup_fd, TT_ISOLATED, &basic_setup, NULL },
+#endif
+ { "mm_functions", test_mm_functions, TT_FORK, NULL, NULL },
+ { "many_events", test_many_events, TT_ISOLATED, &basic_setup, NULL },
+ { "many_events_slow_add", test_many_events, TT_ISOLATED, &basic_setup, (void*)1 },
+
+ { "struct_event_size", test_struct_event_size, 0, NULL, NULL },
+ BASIC(get_assignment, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR),
+
+ BASIC(event_foreach, TT_FORK|TT_NEED_BASE),
+ { "gettimeofday_cached", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"" },
+ { "gettimeofday_cached_sleep", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep" },
+ { "gettimeofday_cached_reset", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep reset" },
+ { "gettimeofday_cached_disabled", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep disable" },
+ { "gettimeofday_cached_disabled_nosleep", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"disable" },
+
+ BASIC(active_by_fd, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR),
+
+#ifndef _WIN32
+ LEGACY(fork, TT_ISOLATED),
+#endif
+#ifdef EVENT__HAVE_PTHREADS
+ /** TODO: support win32 */
+ LEGACY(del_wait, TT_ISOLATED|TT_NEED_THREADS),
+#endif
+
+ END_OF_TESTCASES
+};
+
+struct testcase_t evtag_testcases[] = {
+ { "int", evtag_int_test, TT_FORK, NULL, NULL },
+ { "fuzz", evtag_fuzz, TT_FORK, NULL, NULL },
+ { "encoding", evtag_tag_encoding, TT_FORK, NULL, NULL },
+ { "peek", evtag_test_peek, 0, NULL, NULL },
+
+ END_OF_TESTCASES
+};
+
+struct testcase_t signal_testcases[] = {
+#ifndef _WIN32
+ LEGACY(simplestsignal, TT_ISOLATED),
+ LEGACY(simplesignal, TT_ISOLATED),
+ LEGACY(multiplesignal, TT_ISOLATED),
+ LEGACY(immediatesignal, TT_ISOLATED),
+ LEGACY(signal_dealloc, TT_ISOLATED),
+ LEGACY(signal_pipeloss, TT_ISOLATED),
+ LEGACY(signal_switchbase, TT_ISOLATED|TT_NO_LOGS),
+ LEGACY(signal_restore, TT_ISOLATED),
+ LEGACY(signal_assert, TT_ISOLATED),
+ LEGACY(signal_while_processing, TT_ISOLATED),
+#endif
+ END_OF_TESTCASES
+};
+
diff --git a/libs/libevent/docs/test/regress.h b/libs/libevent/docs/test/regress.h
new file mode 100644
index 0000000000..de1aed3089
--- /dev/null
+++ b/libs/libevent/docs/test/regress.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef REGRESS_H_INCLUDED_
+#define REGRESS_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "tinytest.h"
+#include "tinytest_macros.h"
+
+extern struct testcase_t main_testcases[];
+extern struct testcase_t evtag_testcases[];
+extern struct testcase_t evbuffer_testcases[];
+extern struct testcase_t finalize_testcases[];
+extern struct testcase_t bufferevent_testcases[];
+extern struct testcase_t bufferevent_iocp_testcases[];
+extern struct testcase_t util_testcases[];
+extern struct testcase_t signal_testcases[];
+extern struct testcase_t http_testcases[];
+extern struct testcase_t dns_testcases[];
+extern struct testcase_t rpc_testcases[];
+extern struct testcase_t edgetriggered_testcases[];
+extern struct testcase_t minheap_testcases[];
+extern struct testcase_t iocp_testcases[];
+extern struct testcase_t ssl_testcases[];
+extern struct testcase_t listener_testcases[];
+extern struct testcase_t listener_iocp_testcases[];
+extern struct testcase_t thread_testcases[];
+
+extern struct evutil_weakrand_state test_weakrand_state;
+
+#define test_weakrand() (evutil_weakrand_(&test_weakrand_state))
+
+void regress_threads(void *);
+void test_bufferevent_zlib(void *);
+
+/* Helpers to wrap old testcases */
+extern evutil_socket_t pair[2];
+extern int test_ok;
+extern int called;
+extern struct event_base *global_base;
+extern int in_legacy_test_wrapper;
+
+int regress_make_tmpfile(const void *data, size_t datalen, char **filename_out);
+
+struct basic_test_data {
+ struct event_base *base;
+ evutil_socket_t pair[2];
+
+ void (*legacy_test_fn)(void);
+
+ void *setup_data;
+};
+extern const struct testcase_setup_t basic_setup;
+
+
+extern const struct testcase_setup_t legacy_setup;
+void run_legacy_test_fn(void *ptr);
+
+extern int libevent_tests_running_in_debug_mode;
+
+/* A couple of flags that basic/legacy_setup can support. */
+#define TT_NEED_SOCKETPAIR TT_FIRST_USER_FLAG
+#define TT_NEED_BASE (TT_FIRST_USER_FLAG<<1)
+#define TT_NEED_DNS (TT_FIRST_USER_FLAG<<2)
+#define TT_LEGACY (TT_FIRST_USER_FLAG<<3)
+#define TT_NEED_THREADS (TT_FIRST_USER_FLAG<<4)
+#define TT_NO_LOGS (TT_FIRST_USER_FLAG<<5)
+#define TT_ENABLE_IOCP_FLAG (TT_FIRST_USER_FLAG<<6)
+#define TT_ENABLE_IOCP (TT_ENABLE_IOCP_FLAG|TT_NEED_THREADS)
+
+/* All the flags that a legacy test needs. */
+#define TT_ISOLATED TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE
+
+
+#define BASIC(name,flags) \
+ { #name, test_## name, flags, &basic_setup, NULL }
+
+#define LEGACY(name,flags) \
+ { #name, run_legacy_test_fn, flags|TT_LEGACY, &legacy_setup, \
+ test_## name }
+
+struct evutil_addrinfo;
+struct evutil_addrinfo *ai_find_by_family(struct evutil_addrinfo *ai, int f);
+struct evutil_addrinfo *ai_find_by_protocol(struct evutil_addrinfo *ai, int p);
+int test_ai_eq_(const struct evutil_addrinfo *ai, const char *sockaddr_port,
+ int socktype, int protocol, int line);
+
+#define test_ai_eq(ai, str, s, p) do { \
+ if (test_ai_eq_((ai), (str), (s), (p), __LINE__)<0) \
+ goto end; \
+ } while (0)
+
+#define test_timeval_diff_leq(tv1, tv2, diff, tolerance) \
+ tt_int_op(labs(timeval_msec_diff((tv1), (tv2)) - diff), <=, tolerance)
+
+#define test_timeval_diff_eq(tv1, tv2, diff) \
+ test_timeval_diff_leq((tv1), (tv2), (diff), 50)
+
+long timeval_msec_diff(const struct timeval *start, const struct timeval *end);
+
+#ifndef _WIN32
+pid_t regress_fork(void);
+#endif
+
+#ifdef EVENT__HAVE_OPENSSL
+#include <openssl/ssl.h>
+EVP_PKEY *ssl_getkey(void);
+X509 *ssl_getcert(void);
+SSL_CTX *get_ssl_ctx(void);
+void init_ssl(void);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* REGRESS_H_INCLUDED_ */
diff --git a/libs/libevent/docs/test/regress.rpc b/libs/libevent/docs/test/regress.rpc
new file mode 100644
index 0000000000..0ee904e913
--- /dev/null
+++ b/libs/libevent/docs/test/regress.rpc
@@ -0,0 +1,25 @@
+/* tests data packing and unpacking */
+
+struct msg {
+ string /* sender */ from_name = 1; /* be verbose */
+ string to_name = 2;
+ optional struct[kill] attack = 3;
+ array struct[run] run = 4;
+}
+
+struct kill {
+ string weapon = 0x10121;
+ string action = 2;
+ array int how_often = 3;
+}
+
+struct run {
+ string how = 1;
+ optional bytes some_bytes = 2;
+
+ bytes fixed_bytes[24] = 3;
+ array string notes = 4;
+
+ optional int64 large_number = 5;
+ array int other_numbers = 6;
+}
diff --git a/libs/libevent/docs/test/regress_buffer.c b/libs/libevent/docs/test/regress_buffer.c
new file mode 100644
index 0000000000..957e59f178
--- /dev/null
+++ b/libs/libevent/docs/test/regress_buffer.c
@@ -0,0 +1,2281 @@
+/*
+ * Copyright (c) 2003-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "util-internal.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+#include "event2/event-config.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifndef _WIN32
+#include <sys/socket.h>
+#include <sys/wait.h>
+#include <signal.h>
+#include <unistd.h>
+#include <netdb.h>
+#endif
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+
+#include "event2/event.h"
+#include "event2/buffer.h"
+#include "event2/buffer_compat.h"
+#include "event2/util.h"
+
+#include "defer-internal.h"
+#include "evbuffer-internal.h"
+#include "log-internal.h"
+
+#include "regress.h"
+
+/* Validates that an evbuffer is good. Returns false if it isn't, true if it
+ * is*/
+static int
+evbuffer_validate_(struct evbuffer *buf)
+{
+ struct evbuffer_chain *chain;
+ size_t sum = 0;
+ int found_last_with_datap = 0;
+
+ if (buf->first == NULL) {
+ tt_assert(buf->last == NULL);
+ tt_assert(buf->total_len == 0);
+ }
+
+ chain = buf->first;
+
+ tt_assert(buf->last_with_datap);
+ if (buf->last_with_datap == &buf->first)
+ found_last_with_datap = 1;
+
+ while (chain != NULL) {
+ if (&chain->next == buf->last_with_datap)
+ found_last_with_datap = 1;
+ sum += chain->off;
+ if (chain->next == NULL) {
+ tt_assert(buf->last == chain);
+ }
+ tt_assert(chain->buffer_len >= chain->misalign + chain->off);
+ chain = chain->next;
+ }
+
+ if (buf->first)
+ tt_assert(*buf->last_with_datap);
+
+ if (*buf->last_with_datap) {
+ chain = *buf->last_with_datap;
+ if (chain->off == 0 || buf->total_len == 0) {
+ tt_assert(chain->off == 0)
+ tt_assert(chain == buf->first);
+ tt_assert(buf->total_len == 0);
+ }
+ chain = chain->next;
+ while (chain != NULL) {
+ tt_assert(chain->off == 0);
+ chain = chain->next;
+ }
+ } else {
+ tt_assert(buf->last_with_datap == &buf->first);
+ }
+ tt_assert(found_last_with_datap);
+
+ tt_assert(sum == buf->total_len);
+ return 1;
+ end:
+ return 0;
+}
+
+static void
+evbuffer_get_waste(struct evbuffer *buf, size_t *allocatedp, size_t *wastedp, size_t *usedp)
+{
+ struct evbuffer_chain *chain;
+ size_t a, w, u;
+ int n = 0;
+ u = a = w = 0;
+
+ chain = buf->first;
+ /* skip empty at start */
+ while (chain && chain->off==0) {
+ ++n;
+ a += chain->buffer_len;
+ chain = chain->next;
+ }
+ /* first nonempty chain: stuff at the end only is wasted. */
+ if (chain) {
+ ++n;
+ a += chain->buffer_len;
+ u += chain->off;
+ if (chain->next && chain->next->off)
+ w += (size_t)(chain->buffer_len - (chain->misalign + chain->off));
+ chain = chain->next;
+ }
+ /* subsequent nonempty chains */
+ while (chain && chain->off) {
+ ++n;
+ a += chain->buffer_len;
+ w += (size_t)chain->misalign;
+ u += chain->off;
+ if (chain->next && chain->next->off)
+ w += (size_t) (chain->buffer_len - (chain->misalign + chain->off));
+ chain = chain->next;
+ }
+ /* subsequent empty chains */
+ while (chain) {
+ ++n;
+ a += chain->buffer_len;
+ }
+ *allocatedp = a;
+ *wastedp = w;
+ *usedp = u;
+}
+
+#define evbuffer_validate(buf) \
+ TT_STMT_BEGIN if (!evbuffer_validate_(buf)) TT_DIE(("Buffer format invalid")); TT_STMT_END
+
+static void
+test_evbuffer(void *ptr)
+{
+ static char buffer[512], *tmp;
+ struct evbuffer *evb = evbuffer_new();
+ struct evbuffer *evb_two = evbuffer_new();
+ size_t sz_tmp;
+ int i;
+
+ evbuffer_validate(evb);
+ evbuffer_add_printf(evb, "%s/%d", "hello", 1);
+ evbuffer_validate(evb);
+
+ tt_assert(evbuffer_get_length(evb) == 7);
+ tt_assert(!memcmp((char*)EVBUFFER_DATA(evb), "hello/1", 1));
+
+ evbuffer_add_buffer(evb, evb_two);
+ evbuffer_validate(evb);
+
+ evbuffer_drain(evb, strlen("hello/"));
+ evbuffer_validate(evb);
+ tt_assert(evbuffer_get_length(evb) == 1);
+ tt_assert(!memcmp((char*)EVBUFFER_DATA(evb), "1", 1));
+
+ evbuffer_add_printf(evb_two, "%s", "/hello");
+ evbuffer_validate(evb);
+ evbuffer_add_buffer(evb, evb_two);
+ evbuffer_validate(evb);
+
+ tt_assert(evbuffer_get_length(evb_two) == 0);
+ tt_assert(evbuffer_get_length(evb) == 7);
+ tt_assert(!memcmp((char*)EVBUFFER_DATA(evb), "1/hello", 7));
+
+ memset(buffer, 0, sizeof(buffer));
+ evbuffer_add(evb, buffer, sizeof(buffer));
+ evbuffer_validate(evb);
+ tt_assert(evbuffer_get_length(evb) == 7 + 512);
+
+ tmp = (char *)evbuffer_pullup(evb, 7 + 512);
+ tt_assert(tmp);
+ tt_assert(!strncmp(tmp, "1/hello", 7));
+ tt_assert(!memcmp(tmp + 7, buffer, sizeof(buffer)));
+ evbuffer_validate(evb);
+
+ evbuffer_prepend(evb, "something", 9);
+ evbuffer_validate(evb);
+ evbuffer_prepend(evb, "else", 4);
+ evbuffer_validate(evb);
+
+ tmp = (char *)evbuffer_pullup(evb, 4 + 9 + 7);
+ tt_assert(!strncmp(tmp, "elsesomething1/hello", 4 + 9 + 7));
+ evbuffer_validate(evb);
+
+ evbuffer_drain(evb, -1);
+ evbuffer_validate(evb);
+ evbuffer_drain(evb_two, -1);
+ evbuffer_validate(evb);
+
+ for (i = 0; i < 3; ++i) {
+ evbuffer_add(evb_two, buffer, sizeof(buffer));
+ evbuffer_validate(evb_two);
+ evbuffer_add_buffer(evb, evb_two);
+ evbuffer_validate(evb);
+ evbuffer_validate(evb_two);
+ }
+
+ tt_assert(evbuffer_get_length(evb_two) == 0);
+ tt_assert(evbuffer_get_length(evb) == i * sizeof(buffer));
+
+ /* test remove buffer */
+ sz_tmp = (size_t)(sizeof(buffer)*2.5);
+ evbuffer_remove_buffer(evb, evb_two, sz_tmp);
+ tt_assert(evbuffer_get_length(evb_two) == sz_tmp);
+ tt_assert(evbuffer_get_length(evb) == sizeof(buffer) / 2);
+ evbuffer_validate(evb);
+
+ if (memcmp(evbuffer_pullup(
+ evb, -1), buffer, sizeof(buffer) / 2) != 0 ||
+ memcmp(evbuffer_pullup(
+ evb_two, -1), buffer, sizeof(buffer)) != 0)
+ tt_abort_msg("Pullup did not preserve content");
+
+ evbuffer_validate(evb);
+
+
+ /* testing one-vector reserve and commit */
+ {
+ struct evbuffer_iovec v[1];
+ char *buf;
+ int i, j, r;
+
+ for (i = 0; i < 3; ++i) {
+ r = evbuffer_reserve_space(evb, 10000, v, 1);
+ tt_int_op(r, ==, 1);
+ tt_assert(v[0].iov_len >= 10000);
+ tt_assert(v[0].iov_base != NULL);
+
+ evbuffer_validate(evb);
+ buf = v[0].iov_base;
+ for (j = 0; j < 10000; ++j) {
+ buf[j] = j;
+ }
+ evbuffer_validate(evb);
+
+ tt_int_op(evbuffer_commit_space(evb, v, 1), ==, 0);
+ evbuffer_validate(evb);
+
+ tt_assert(evbuffer_get_length(evb) >= 10000);
+
+ evbuffer_drain(evb, j * 5000);
+ evbuffer_validate(evb);
+ }
+ }
+
+ end:
+ evbuffer_free(evb);
+ evbuffer_free(evb_two);
+}
+
+static void
+no_cleanup(const void *data, size_t datalen, void *extra)
+{
+}
+
+static void
+test_evbuffer_remove_buffer_with_empty(void *ptr)
+{
+ struct evbuffer *src = evbuffer_new();
+ struct evbuffer *dst = evbuffer_new();
+ char buf[2];
+
+ evbuffer_validate(src);
+ evbuffer_validate(dst);
+
+ /* setup the buffers */
+ /* we need more data in src than we will move later */
+ evbuffer_add_reference(src, buf, sizeof(buf), no_cleanup, NULL);
+ evbuffer_add_reference(src, buf, sizeof(buf), no_cleanup, NULL);
+ /* we need one buffer in dst and one empty buffer at the end */
+ evbuffer_add(dst, buf, sizeof(buf));
+ evbuffer_add_reference(dst, buf, 0, no_cleanup, NULL);
+
+ evbuffer_validate(src);
+ evbuffer_validate(dst);
+
+ /* move three bytes over */
+ evbuffer_remove_buffer(src, dst, 3);
+
+ evbuffer_validate(src);
+ evbuffer_validate(dst);
+
+end:
+ evbuffer_free(src);
+ evbuffer_free(dst);
+}
+
+static void
+test_evbuffer_reserve2(void *ptr)
+{
+ /* Test the two-vector cases of reserve/commit. */
+ struct evbuffer *buf = evbuffer_new();
+ int n, i;
+ struct evbuffer_iovec v[2];
+ size_t remaining;
+ char *cp, *cp2;
+
+ /* First chunk will necessarily be one chunk. Use 512 bytes of it.*/
+ n = evbuffer_reserve_space(buf, 1024, v, 2);
+ tt_int_op(n, ==, 1);
+ tt_int_op(evbuffer_get_length(buf), ==, 0);
+ tt_assert(v[0].iov_base != NULL);
+ tt_int_op(v[0].iov_len, >=, 1024);
+ memset(v[0].iov_base, 'X', 512);
+ cp = v[0].iov_base;
+ remaining = v[0].iov_len - 512;
+ v[0].iov_len = 512;
+ evbuffer_validate(buf);
+ tt_int_op(0, ==, evbuffer_commit_space(buf, v, 1));
+ tt_int_op(evbuffer_get_length(buf), ==, 512);
+ evbuffer_validate(buf);
+
+ /* Ask for another same-chunk request, in an existing chunk. Use 8
+ * bytes of it. */
+ n = evbuffer_reserve_space(buf, 32, v, 2);
+ tt_int_op(n, ==, 1);
+ tt_assert(cp + 512 == v[0].iov_base);
+ tt_int_op(remaining, ==, v[0].iov_len);
+ memset(v[0].iov_base, 'Y', 8);
+ v[0].iov_len = 8;
+ tt_int_op(0, ==, evbuffer_commit_space(buf, v, 1));
+ tt_int_op(evbuffer_get_length(buf), ==, 520);
+ remaining -= 8;
+ evbuffer_validate(buf);
+
+ /* Now ask for a request that will be split. Use only one byte of it,
+ though. */
+ n = evbuffer_reserve_space(buf, remaining+64, v, 2);
+ tt_int_op(n, ==, 2);
+ tt_assert(cp + 520 == v[0].iov_base);
+ tt_int_op(remaining, ==, v[0].iov_len);
+ tt_assert(v[1].iov_base);
+ tt_assert(v[1].iov_len >= 64);
+ cp2 = v[1].iov_base;
+ memset(v[0].iov_base, 'Z', 1);
+ v[0].iov_len = 1;
+ tt_int_op(0, ==, evbuffer_commit_space(buf, v, 1));
+ tt_int_op(evbuffer_get_length(buf), ==, 521);
+ remaining -= 1;
+ evbuffer_validate(buf);
+
+ /* Now ask for a request that will be split. Use some of the first
+ * part and some of the second. */
+ n = evbuffer_reserve_space(buf, remaining+64, v, 2);
+ evbuffer_validate(buf);
+ tt_int_op(n, ==, 2);
+ tt_assert(cp + 521 == v[0].iov_base);
+ tt_int_op(remaining, ==, v[0].iov_len);
+ tt_assert(v[1].iov_base == cp2);
+ tt_assert(v[1].iov_len >= 64);
+ memset(v[0].iov_base, 'W', 400);
+ v[0].iov_len = 400;
+ memset(v[1].iov_base, 'x', 60);
+ v[1].iov_len = 60;
+ tt_int_op(0, ==, evbuffer_commit_space(buf, v, 2));
+ tt_int_op(evbuffer_get_length(buf), ==, 981);
+ evbuffer_validate(buf);
+
+ /* Now peek to make sure stuff got made how we like. */
+ memset(v,0,sizeof(v));
+ n = evbuffer_peek(buf, -1, NULL, v, 2);
+ tt_int_op(n, ==, 2);
+ tt_int_op(v[0].iov_len, ==, 921);
+ tt_int_op(v[1].iov_len, ==, 60);
+
+ cp = v[0].iov_base;
+ for (i=0; i<512; ++i)
+ tt_int_op(cp[i], ==, 'X');
+ for (i=512; i<520; ++i)
+ tt_int_op(cp[i], ==, 'Y');
+ for (i=520; i<521; ++i)
+ tt_int_op(cp[i], ==, 'Z');
+ for (i=521; i<921; ++i)
+ tt_int_op(cp[i], ==, 'W');
+
+ cp = v[1].iov_base;
+ for (i=0; i<60; ++i)
+ tt_int_op(cp[i], ==, 'x');
+
+end:
+ evbuffer_free(buf);
+}
+
+static void
+test_evbuffer_reserve_many(void *ptr)
+{
+ /* This is a glass-box test to handle expanding a buffer with more
+ * chunks and reallocating chunks as needed */
+ struct evbuffer *buf = evbuffer_new();
+ struct evbuffer_iovec v[8];
+ int n;
+ size_t sz;
+ int add_data = ptr && !strcmp(ptr, "add");
+ int fill_first = ptr && !strcmp(ptr, "fill");
+ char *cp1, *cp2;
+
+ /* When reserving the the first chunk, we just allocate it */
+ n = evbuffer_reserve_space(buf, 128, v, 2);
+ evbuffer_validate(buf);
+ tt_int_op(n, ==, 1);
+ tt_assert(v[0].iov_len >= 128);
+ sz = v[0].iov_len;
+ cp1 = v[0].iov_base;
+ if (add_data) {
+ *(char*)v[0].iov_base = 'X';
+ v[0].iov_len = 1;
+ n = evbuffer_commit_space(buf, v, 1);
+ tt_int_op(n, ==, 0);
+ } else if (fill_first) {
+ memset(v[0].iov_base, 'X', v[0].iov_len);
+ n = evbuffer_commit_space(buf, v, 1);
+ tt_int_op(n, ==, 0);
+ n = evbuffer_reserve_space(buf, 128, v, 2);
+ tt_int_op(n, ==, 1);
+ sz = v[0].iov_len;
+ tt_assert(v[0].iov_base != cp1);
+ cp1 = v[0].iov_base;
+ }
+
+ /* Make another chunk get added. */
+ n = evbuffer_reserve_space(buf, sz+128, v, 2);
+ evbuffer_validate(buf);
+ tt_int_op(n, ==, 2);
+ sz = v[0].iov_len + v[1].iov_len;
+ tt_int_op(sz, >=, v[0].iov_len+128);
+ if (add_data) {
+ tt_assert(v[0].iov_base == cp1 + 1);
+ } else {
+ tt_assert(v[0].iov_base == cp1);
+ }
+ cp1 = v[0].iov_base;
+ cp2 = v[1].iov_base;
+
+ /* And a third chunk. */
+ n = evbuffer_reserve_space(buf, sz+128, v, 3);
+ evbuffer_validate(buf);
+ tt_int_op(n, ==, 3);
+ tt_assert(cp1 == v[0].iov_base);
+ tt_assert(cp2 == v[1].iov_base);
+ sz = v[0].iov_len + v[1].iov_len + v[2].iov_len;
+
+ /* Now force a reallocation by asking for more space in only 2
+ * buffers. */
+ n = evbuffer_reserve_space(buf, sz+128, v, 2);
+ evbuffer_validate(buf);
+ if (add_data) {
+ tt_int_op(n, ==, 2);
+ tt_assert(cp1 == v[0].iov_base);
+ } else {
+ tt_int_op(n, ==, 1);
+ }
+
+end:
+ evbuffer_free(buf);
+}
+
+static void
+test_evbuffer_expand(void *ptr)
+{
+ char data[4096];
+ struct evbuffer *buf;
+ size_t a,w,u;
+ void *buffer;
+
+ memset(data, 'X', sizeof(data));
+
+ /* Make sure that expand() works on an empty buffer */
+ buf = evbuffer_new();
+ tt_int_op(evbuffer_expand(buf, 20000), ==, 0);
+ evbuffer_validate(buf);
+ a=w=u=0;
+ evbuffer_get_waste(buf, &a,&w,&u);
+ tt_assert(w == 0);
+ tt_assert(u == 0);
+ tt_assert(a >= 20000);
+ tt_assert(buf->first);
+ tt_assert(buf->first == buf->last);
+ tt_assert(buf->first->off == 0);
+ tt_assert(buf->first->buffer_len >= 20000);
+
+ /* Make sure that expand() works as a no-op when there's enough
+ * contiguous space already. */
+ buffer = buf->first->buffer;
+ evbuffer_add(buf, data, 1024);
+ tt_int_op(evbuffer_expand(buf, 1024), ==, 0);
+ tt_assert(buf->first->buffer == buffer);
+ evbuffer_validate(buf);
+ evbuffer_free(buf);
+
+ /* Make sure that expand() can work by moving misaligned data
+ * when it makes sense to do so. */
+ buf = evbuffer_new();
+ evbuffer_add(buf, data, 400);
+ {
+ int n = (int)(buf->first->buffer_len - buf->first->off - 1);
+ tt_assert(n < (int)sizeof(data));
+ evbuffer_add(buf, data, n);
+ }
+ tt_assert(buf->first == buf->last);
+ tt_assert(buf->first->off == buf->first->buffer_len - 1);
+ evbuffer_drain(buf, buf->first->off - 1);
+ tt_assert(1 == evbuffer_get_length(buf));
+ tt_assert(buf->first->misalign > 0);
+ tt_assert(buf->first->off == 1);
+ buffer = buf->first->buffer;
+ tt_assert(evbuffer_expand(buf, 40) == 0);
+ tt_assert(buf->first == buf->last);
+ tt_assert(buf->first->off == 1);
+ tt_assert(buf->first->buffer == buffer);
+ tt_assert(buf->first->misalign == 0);
+ evbuffer_validate(buf);
+ evbuffer_free(buf);
+
+ /* add, expand, pull-up: This used to crash libevent. */
+ buf = evbuffer_new();
+
+ evbuffer_add(buf, data, sizeof(data));
+ evbuffer_add(buf, data, sizeof(data));
+ evbuffer_add(buf, data, sizeof(data));
+
+ evbuffer_validate(buf);
+ evbuffer_expand(buf, 1024);
+ evbuffer_validate(buf);
+ evbuffer_pullup(buf, -1);
+ evbuffer_validate(buf);
+
+end:
+ evbuffer_free(buf);
+}
+
+
+static int reference_cb_called;
+static void
+reference_cb(const void *data, size_t len, void *extra)
+{
+ tt_str_op(data, ==, "this is what we add as read-only memory.");
+ tt_int_op(len, ==, strlen(data));
+ tt_want(extra == (void *)0xdeadaffe);
+ ++reference_cb_called;
+end:
+ ;
+}
+
+static void
+test_evbuffer_reference(void *ptr)
+{
+ struct evbuffer *src = evbuffer_new();
+ struct evbuffer *dst = evbuffer_new();
+ struct evbuffer_iovec v[1];
+ const char *data = "this is what we add as read-only memory.";
+ reference_cb_called = 0;
+
+ tt_assert(evbuffer_add_reference(src, data, strlen(data),
+ reference_cb, (void *)0xdeadaffe) != -1);
+
+ evbuffer_reserve_space(dst, strlen(data), v, 1);
+ tt_assert(evbuffer_remove(src, v[0].iov_base, 10) != -1);
+
+ evbuffer_validate(src);
+ evbuffer_validate(dst);
+
+ /* make sure that we don't write data at the beginning */
+ evbuffer_prepend(src, "aaaaa", 5);
+ evbuffer_validate(src);
+ evbuffer_drain(src, 5);
+
+ tt_assert(evbuffer_remove(src, ((char*)(v[0].iov_base)) + 10,
+ strlen(data) - 10) != -1);
+
+ v[0].iov_len = strlen(data);
+
+ evbuffer_commit_space(dst, v, 1);
+ evbuffer_validate(src);
+ evbuffer_validate(dst);
+
+ tt_int_op(reference_cb_called, ==, 1);
+
+ tt_assert(!memcmp(evbuffer_pullup(dst, strlen(data)),
+ data, strlen(data)));
+ evbuffer_validate(dst);
+
+ end:
+ evbuffer_free(dst);
+ evbuffer_free(src);
+}
+
+static struct event_base *addfile_test_event_base = NULL;
+static int addfile_test_done_writing = 0;
+static int addfile_test_total_written = 0;
+static int addfile_test_total_read = 0;
+
+static void
+addfile_test_writecb(evutil_socket_t fd, short what, void *arg)
+{
+ struct evbuffer *b = arg;
+ int r;
+ evbuffer_validate(b);
+ while (evbuffer_get_length(b)) {
+ r = evbuffer_write(b, fd);
+ if (r > 0) {
+ addfile_test_total_written += r;
+ TT_BLATHER(("Wrote %d/%d bytes", r, addfile_test_total_written));
+ } else {
+ int e = evutil_socket_geterror(fd);
+ if (EVUTIL_ERR_RW_RETRIABLE(e))
+ return;
+ tt_fail_perror("write");
+ event_base_loopexit(addfile_test_event_base,NULL);
+ }
+ evbuffer_validate(b);
+ }
+ addfile_test_done_writing = 1;
+ return;
+end:
+ event_base_loopexit(addfile_test_event_base,NULL);
+}
+
+static void
+addfile_test_readcb(evutil_socket_t fd, short what, void *arg)
+{
+ struct evbuffer *b = arg;
+ int e, r = 0;
+ do {
+ r = evbuffer_read(b, fd, 1024);
+ if (r > 0) {
+ addfile_test_total_read += r;
+ TT_BLATHER(("Read %d/%d bytes", r, addfile_test_total_read));
+ }
+ } while (r > 0);
+ if (r < 0) {
+ e = evutil_socket_geterror(fd);
+ if (! EVUTIL_ERR_RW_RETRIABLE(e)) {
+ tt_fail_perror("read");
+ event_base_loopexit(addfile_test_event_base,NULL);
+ }
+ }
+ if (addfile_test_done_writing &&
+ addfile_test_total_read >= addfile_test_total_written) {
+ event_base_loopexit(addfile_test_event_base,NULL);
+ }
+}
+
+static void
+test_evbuffer_add_file(void *ptr)
+{
+ struct basic_test_data *testdata = ptr;
+ const char *impl = testdata->setup_data;
+ struct evbuffer *src = evbuffer_new(), *dest = evbuffer_new();
+ char *tmpfilename = NULL;
+ char *data = NULL;
+ const char *expect_data;
+ size_t datalen, expect_len;
+ const char *compare;
+ int fd = -1;
+ int want_ismapping = -1, want_cansendfile = -1;
+ unsigned flags = 0;
+ int use_segment = 1, use_bigfile = 0, map_from_offset = 0,
+ view_from_offset = 0;
+ struct evbuffer_file_segment *seg = NULL;
+ ev_off_t starting_offset = 0, mapping_len = -1;
+ ev_off_t segment_offset = 0, segment_len = -1;
+ struct event *rev=NULL, *wev=NULL;
+ struct event_base *base = testdata->base;
+ evutil_socket_t pair[2] = {-1, -1};
+ struct evutil_weakrand_state seed = { 123456789U };
+
+ /* This test is highly parameterized based on substrings of its
+ * argument. The strings are: */
+ tt_assert(impl);
+ if (strstr(impl, "nosegment")) {
+ /* If nosegment is set, use the older evbuffer_add_file
+ * interface */
+ use_segment = 0;
+ }
+ if (strstr(impl, "bigfile")) {
+ /* If bigfile is set, use a 512K file. Else use a smaller
+ * one. */
+ use_bigfile = 1;
+ }
+ if (strstr(impl, "map_offset")) {
+ /* If map_offset is set, we build the file segment starting
+ * from a point other than byte 0 and ending somewhere other
+ * than the last byte. Otherwise we map the whole thing */
+ map_from_offset = 1;
+ }
+ if (strstr(impl, "offset_in_segment")) {
+ /* If offset_in_segment is set, we add a subsection of the
+ * file semgment starting from a point other than byte 0 of
+ * the segment. */
+ view_from_offset = 1;
+ }
+ if (strstr(impl, "sendfile")) {
+ /* If sendfile is set, we try to use a sendfile/splice style
+ * backend. */
+ flags = EVBUF_FS_DISABLE_MMAP;
+ want_cansendfile = 1;
+ want_ismapping = 0;
+ } else if (strstr(impl, "mmap")) {
+ /* If sendfile is set, we try to use a mmap/CreateFileMapping
+ * style backend. */
+ flags = EVBUF_FS_DISABLE_SENDFILE;
+ want_ismapping = 1;
+ want_cansendfile = 0;
+ } else if (strstr(impl, "linear")) {
+ /* If linear is set, we try to use a read-the-whole-thing
+ * backend. */
+ flags = EVBUF_FS_DISABLE_SENDFILE|EVBUF_FS_DISABLE_MMAP;
+ want_ismapping = 0;
+ want_cansendfile = 0;
+ } else if (strstr(impl, "default")) {
+ /* The caller doesn't care which backend we use. */
+ ;
+ } else {
+ /* The caller must choose a backend. */
+ TT_DIE(("Didn't recognize the implementation"));
+ }
+
+ if (use_bigfile) {
+ unsigned int i;
+ datalen = 1024*512;
+ data = malloc(1024*512);
+ tt_assert(data);
+ for (i = 0; i < datalen; ++i)
+ data[i] = (char)evutil_weakrand_(&seed);
+ } else {
+ data = strdup("here is a relatively small string.");
+ tt_assert(data);
+ datalen = strlen(data);
+ }
+
+ fd = regress_make_tmpfile(data, datalen, &tmpfilename);
+
+ if (map_from_offset) {
+ starting_offset = datalen/4 + 1;
+ mapping_len = datalen / 2 - 1;
+ expect_data = data + starting_offset;
+ expect_len = mapping_len;
+ } else {
+ expect_data = data;
+ expect_len = datalen;
+ }
+ if (view_from_offset) {
+ tt_assert(use_segment); /* Can't do this with add_file*/
+ segment_offset = expect_len / 3;
+ segment_len = expect_len / 2;
+ expect_data = expect_data + segment_offset;
+ expect_len = segment_len;
+ }
+
+ if (use_segment) {
+ seg = evbuffer_file_segment_new(fd, starting_offset,
+ mapping_len, flags);
+ tt_assert(seg);
+ if (want_ismapping >= 0) {
+ if (seg->is_mapping != (unsigned)want_ismapping)
+ tt_skip();
+ }
+ if (want_cansendfile >= 0) {
+ if (seg->can_sendfile != (unsigned)want_cansendfile)
+ tt_skip();
+ }
+ }
+
+ /* Say that it drains to a fd so that we can use sendfile. */
+ evbuffer_set_flags(src, EVBUFFER_FLAG_DRAINS_TO_FD);
+
+#if defined(EVENT__HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__)
+ /* We need to use a pair of AF_INET sockets, since Solaris
+ doesn't support sendfile() over AF_UNIX. */
+ if (evutil_ersatz_socketpair_(AF_INET, SOCK_STREAM, 0, pair) == -1)
+ tt_abort_msg("ersatz_socketpair failed");
+#else
+ if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1)
+ tt_abort_msg("socketpair failed");
+#endif
+ evutil_make_socket_nonblocking(pair[0]);
+ evutil_make_socket_nonblocking(pair[1]);
+
+ tt_assert(fd != -1);
+
+ if (use_segment) {
+ tt_assert(evbuffer_add_file_segment(src, seg,
+ segment_offset, segment_len)!=-1);
+ } else {
+ tt_assert(evbuffer_add_file(src, fd, starting_offset,
+ mapping_len) != -1);
+ }
+
+ evbuffer_validate(src);
+
+ addfile_test_event_base = base;
+ wev = event_new(base, pair[0], EV_WRITE|EV_PERSIST,
+ addfile_test_writecb, src);
+ rev = event_new(base, pair[1], EV_READ|EV_PERSIST,
+ addfile_test_readcb, dest);
+
+ event_add(wev, NULL);
+ event_add(rev, NULL);
+ event_base_dispatch(base);
+
+ evbuffer_validate(src);
+ evbuffer_validate(dest);
+
+ tt_assert(addfile_test_done_writing);
+ tt_int_op(addfile_test_total_written, ==, expect_len);
+ tt_int_op(addfile_test_total_read, ==, expect_len);
+
+ compare = (char *)evbuffer_pullup(dest, expect_len);
+ tt_assert(compare != NULL);
+ if (memcmp(compare, expect_data, expect_len)) {
+ tt_abort_msg("Data from add_file differs.");
+ }
+
+ evbuffer_validate(dest);
+ end:
+ if (data)
+ free(data);
+ if (seg)
+ evbuffer_file_segment_free(seg);
+ if (src)
+ evbuffer_free(src);
+ if (dest)
+ evbuffer_free(dest);
+ if (pair[0] >= 0)
+ evutil_closesocket(pair[0]);
+ if (pair[1] >= 0)
+ evutil_closesocket(pair[1]);
+ if (wev)
+ event_free(wev);
+ if (rev)
+ event_free(rev);
+ if (tmpfilename) {
+ unlink(tmpfilename);
+ free(tmpfilename);
+ }
+}
+
+static int file_segment_cleanup_cb_called_count = 0;
+static struct evbuffer_file_segment const* file_segment_cleanup_cb_called_with = NULL;
+static int file_segment_cleanup_cb_called_with_flags = 0;
+static void* file_segment_cleanup_cb_called_with_arg = NULL;
+static void
+file_segment_cleanup_cp(struct evbuffer_file_segment const* seg, int flags, void* arg)
+{
+ ++file_segment_cleanup_cb_called_count;
+ file_segment_cleanup_cb_called_with = seg;
+ file_segment_cleanup_cb_called_with_flags = flags;
+ file_segment_cleanup_cb_called_with_arg = arg;
+}
+
+static void
+test_evbuffer_file_segment_add_cleanup_cb(void* ptr)
+{
+ char *tmpfilename = NULL;
+ int fd = -1;
+ struct evbuffer *evb = NULL;
+ struct evbuffer_file_segment *seg = NULL, *segptr;
+ char const* arg = "token";
+
+ fd = regress_make_tmpfile("file_segment_test_file", 22, &tmpfilename);
+ tt_int_op(fd, >=, 0);
+
+ evb = evbuffer_new();
+ tt_assert(evb);
+
+ segptr = seg = evbuffer_file_segment_new(fd, 0, -1, 0);
+ tt_assert(seg);
+
+ evbuffer_file_segment_add_cleanup_cb(
+ seg, &file_segment_cleanup_cp, (void*)arg);
+
+ tt_assert(fd != -1);
+
+ tt_assert(evbuffer_add_file_segment(evb, seg, 0, -1)!=-1);
+
+ evbuffer_validate(evb);
+
+ tt_int_op(file_segment_cleanup_cb_called_count, ==, 0);
+ evbuffer_file_segment_free(seg);
+ seg = NULL; /* Prevent double-free. */
+
+ tt_int_op(file_segment_cleanup_cb_called_count, ==, 0);
+ evbuffer_free(evb);
+ evb = NULL; /* pevent double-free */
+
+ tt_int_op(file_segment_cleanup_cb_called_count, ==, 1);
+ tt_assert(file_segment_cleanup_cb_called_with == segptr);
+ tt_assert(file_segment_cleanup_cb_called_with_flags == 0);
+ tt_assert(file_segment_cleanup_cb_called_with_arg == (void*)arg);
+
+end:
+ if (evb)
+ evbuffer_free(evb);
+ if (seg)
+ evbuffer_file_segment_free(seg);
+ if (tmpfilename) {
+ unlink(tmpfilename);
+ free(tmpfilename);
+ }
+}
+
+#ifndef EVENT__DISABLE_MM_REPLACEMENT
+static void *
+failing_malloc(size_t how_much)
+{
+ errno = ENOMEM;
+ return NULL;
+}
+#endif
+
+static void
+test_evbuffer_readln(void *ptr)
+{
+ struct evbuffer *evb = evbuffer_new();
+ struct evbuffer *evb_tmp = evbuffer_new();
+ const char *s;
+ char *cp = NULL;
+ size_t sz;
+
+#define tt_line_eq(content) \
+ TT_STMT_BEGIN \
+ if (!cp || sz != strlen(content) || strcmp(cp, content)) { \
+ TT_DIE(("Wanted %s; got %s [%d]", content, cp, (int)sz)); \
+ } \
+ TT_STMT_END
+
+ /* Test EOL_ANY. */
+ s = "complex silly newline\r\n\n\r\n\n\rmore\0\n";
+ evbuffer_add(evb, s, strlen(s)+2);
+ evbuffer_validate(evb);
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_ANY);
+ tt_line_eq("complex silly newline");
+ free(cp);
+ evbuffer_validate(evb);
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_ANY);
+ if (!cp || sz != 5 || memcmp(cp, "more\0\0", 6))
+ tt_abort_msg("Not as expected");
+ tt_uint_op(evbuffer_get_length(evb), ==, 0);
+ evbuffer_validate(evb);
+ s = "\nno newline";
+ evbuffer_add(evb, s, strlen(s));
+ free(cp);
+ evbuffer_validate(evb);
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_ANY);
+ tt_line_eq("");
+ free(cp);
+ evbuffer_validate(evb);
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_ANY);
+ tt_assert(!cp);
+ evbuffer_validate(evb);
+ evbuffer_drain(evb, evbuffer_get_length(evb));
+ tt_assert(evbuffer_get_length(evb) == 0);
+ evbuffer_validate(evb);
+
+ /* Test EOL_CRLF */
+ s = "Line with\rin the middle\nLine with good crlf\r\n\nfinal\n";
+ evbuffer_add(evb, s, strlen(s));
+ evbuffer_validate(evb);
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF);
+ tt_line_eq("Line with\rin the middle");
+ free(cp);
+ evbuffer_validate(evb);
+
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF);
+ tt_line_eq("Line with good crlf");
+ free(cp);
+ evbuffer_validate(evb);
+
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF);
+ tt_line_eq("");
+ free(cp);
+ evbuffer_validate(evb);
+
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF);
+ tt_line_eq("final");
+ s = "x";
+ evbuffer_validate(evb);
+ evbuffer_add(evb, s, 1);
+ evbuffer_validate(evb);
+ free(cp);
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF);
+ tt_assert(!cp);
+ evbuffer_validate(evb);
+
+ /* Test CRLF_STRICT */
+ s = " and a bad crlf\nand a good one\r\n\r\nMore\r";
+ evbuffer_add(evb, s, strlen(s));
+ evbuffer_validate(evb);
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+ tt_line_eq("x and a bad crlf\nand a good one");
+ free(cp);
+ evbuffer_validate(evb);
+
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+ tt_line_eq("");
+ free(cp);
+ evbuffer_validate(evb);
+
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+ tt_assert(!cp);
+ evbuffer_validate(evb);
+ evbuffer_add(evb, "\n", 1);
+ evbuffer_validate(evb);
+
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+ tt_line_eq("More");
+ free(cp);
+ tt_assert(evbuffer_get_length(evb) == 0);
+ evbuffer_validate(evb);
+
+ s = "An internal CR\r is not an eol\r\nNor is a lack of one";
+ evbuffer_add(evb, s, strlen(s));
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+ tt_line_eq("An internal CR\r is not an eol");
+ free(cp);
+ evbuffer_validate(evb);
+
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+ tt_assert(!cp);
+ evbuffer_validate(evb);
+
+ evbuffer_add(evb, "\r\n", 2);
+ evbuffer_validate(evb);
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+ tt_line_eq("Nor is a lack of one");
+ free(cp);
+ tt_assert(evbuffer_get_length(evb) == 0);
+ evbuffer_validate(evb);
+
+ /* Test LF */
+ s = "An\rand a nl\n\nText";
+ evbuffer_add(evb, s, strlen(s));
+ evbuffer_validate(evb);
+
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF);
+ tt_line_eq("An\rand a nl");
+ free(cp);
+ evbuffer_validate(evb);
+
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF);
+ tt_line_eq("");
+ free(cp);
+ evbuffer_validate(evb);
+
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF);
+ tt_assert(!cp);
+ free(cp);
+ evbuffer_add(evb, "\n", 1);
+ evbuffer_validate(evb);
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF);
+ tt_line_eq("Text");
+ free(cp);
+ evbuffer_validate(evb);
+
+ /* Test NUL */
+ tt_int_op(evbuffer_get_length(evb), ==, 0);
+ {
+ char x[] =
+ "NUL\n\0\0"
+ "The all-zeros character which may serve\0"
+ "to accomplish time fill\0and media fill";
+ /* Add all but the final NUL of x. */
+ evbuffer_add(evb, x, sizeof(x)-1);
+ }
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_NUL);
+ tt_line_eq("NUL\n");
+ free(cp);
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_NUL);
+ tt_line_eq("");
+ free(cp);
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_NUL);
+ tt_line_eq("The all-zeros character which may serve");
+ free(cp);
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_NUL);
+ tt_line_eq("to accomplish time fill");
+ free(cp);
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_NUL);
+ tt_ptr_op(cp, ==, NULL);
+ evbuffer_drain(evb, -1);
+
+ /* Test CRLF_STRICT - across boundaries*/
+ s = " and a bad crlf\nand a good one\r";
+ evbuffer_add(evb_tmp, s, strlen(s));
+ evbuffer_validate(evb);
+ evbuffer_add_buffer(evb, evb_tmp);
+ evbuffer_validate(evb);
+ s = "\n\r";
+ evbuffer_add(evb_tmp, s, strlen(s));
+ evbuffer_validate(evb);
+ evbuffer_add_buffer(evb, evb_tmp);
+ evbuffer_validate(evb);
+ s = "\nMore\r";
+ evbuffer_add(evb_tmp, s, strlen(s));
+ evbuffer_validate(evb);
+ evbuffer_add_buffer(evb, evb_tmp);
+ evbuffer_validate(evb);
+
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+ tt_line_eq(" and a bad crlf\nand a good one");
+ free(cp);
+ evbuffer_validate(evb);
+
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+ tt_line_eq("");
+ free(cp);
+ evbuffer_validate(evb);
+
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+ tt_assert(!cp);
+ free(cp);
+ evbuffer_validate(evb);
+ evbuffer_add(evb, "\n", 1);
+ evbuffer_validate(evb);
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+ tt_line_eq("More");
+ free(cp); cp = NULL;
+ evbuffer_validate(evb);
+ tt_assert(evbuffer_get_length(evb) == 0);
+
+ /* Test memory problem*/
+ s = "one line\ntwo line\nblue line";
+ evbuffer_add(evb_tmp, s, strlen(s));
+ evbuffer_validate(evb);
+ evbuffer_add_buffer(evb, evb_tmp);
+ evbuffer_validate(evb);
+
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF);
+ tt_line_eq("one line");
+ free(cp); cp = NULL;
+ evbuffer_validate(evb);
+
+ /* the next call to readline should fail */
+#ifndef EVENT__DISABLE_MM_REPLACEMENT
+ event_set_mem_functions(failing_malloc, realloc, free);
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF);
+ tt_assert(cp == NULL);
+ evbuffer_validate(evb);
+
+ /* now we should get the next line back */
+ event_set_mem_functions(malloc, realloc, free);
+#endif
+ cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF);
+ tt_line_eq("two line");
+ free(cp); cp = NULL;
+ evbuffer_validate(evb);
+
+ end:
+ evbuffer_free(evb);
+ evbuffer_free(evb_tmp);
+ if (cp) free(cp);
+}
+
+static void
+test_evbuffer_search_eol(void *ptr)
+{
+ struct evbuffer *buf = evbuffer_new();
+ struct evbuffer_ptr ptr1, ptr2;
+ const char *s;
+ size_t eol_len;
+
+ s = "string! \r\n\r\nx\n";
+ evbuffer_add(buf, s, strlen(s));
+ eol_len = -1;
+ ptr1 = evbuffer_search_eol(buf, NULL, &eol_len, EVBUFFER_EOL_CRLF);
+ tt_int_op(ptr1.pos, ==, 8);
+ tt_int_op(eol_len, ==, 2);
+
+ eol_len = -1;
+ ptr2 = evbuffer_search_eol(buf, &ptr1, &eol_len, EVBUFFER_EOL_CRLF);
+ tt_int_op(ptr2.pos, ==, 8);
+ tt_int_op(eol_len, ==, 2);
+
+ evbuffer_ptr_set(buf, &ptr1, 1, EVBUFFER_PTR_ADD);
+ eol_len = -1;
+ ptr2 = evbuffer_search_eol(buf, &ptr1, &eol_len, EVBUFFER_EOL_CRLF);
+ tt_int_op(ptr2.pos, ==, 9);
+ tt_int_op(eol_len, ==, 1);
+
+ eol_len = -1;
+ ptr2 = evbuffer_search_eol(buf, &ptr1, &eol_len, EVBUFFER_EOL_CRLF_STRICT);
+ tt_int_op(ptr2.pos, ==, 10);
+ tt_int_op(eol_len, ==, 2);
+
+ eol_len = -1;
+ ptr1 = evbuffer_search_eol(buf, NULL, &eol_len, EVBUFFER_EOL_LF);
+ tt_int_op(ptr1.pos, ==, 9);
+ tt_int_op(eol_len, ==, 1);
+
+ eol_len = -1;
+ ptr2 = evbuffer_search_eol(buf, &ptr1, &eol_len, EVBUFFER_EOL_LF);
+ tt_int_op(ptr2.pos, ==, 9);
+ tt_int_op(eol_len, ==, 1);
+
+ evbuffer_ptr_set(buf, &ptr1, 1, EVBUFFER_PTR_ADD);
+ eol_len = -1;
+ ptr2 = evbuffer_search_eol(buf, &ptr1, &eol_len, EVBUFFER_EOL_LF);
+ tt_int_op(ptr2.pos, ==, 11);
+ tt_int_op(eol_len, ==, 1);
+
+ tt_assert(evbuffer_ptr_set(buf, &ptr1, evbuffer_get_length(buf), EVBUFFER_PTR_SET) == 0);
+ eol_len = -1;
+ ptr2 = evbuffer_search_eol(buf, &ptr1, &eol_len, EVBUFFER_EOL_LF);
+ tt_int_op(ptr2.pos, ==, -1);
+ tt_int_op(eol_len, ==, 0);
+
+end:
+ evbuffer_free(buf);
+}
+
+static void
+test_evbuffer_iterative(void *ptr)
+{
+ struct evbuffer *buf = evbuffer_new();
+ const char *abc = "abcdefghijklmnopqrstvuwxyzabcdefghijklmnopqrstvuwxyzabcdefghijklmnopqrstvuwxyzabcdefghijklmnopqrstvuwxyz";
+ unsigned i, j, sum, n;
+
+ sum = 0;
+ n = 0;
+ for (i = 0; i < 1000; ++i) {
+ for (j = 1; j < strlen(abc); ++j) {
+ char format[32];
+ evutil_snprintf(format, sizeof(format), "%%%u.%us", j, j);
+ evbuffer_add_printf(buf, format, abc);
+
+ /* Only check for rep violations every so often.
+ Walking over the whole list of chains can get
+ pretty expensive as it gets long.
+ */
+ if ((n % 337) == 0)
+ evbuffer_validate(buf);
+
+ sum += j;
+ n++;
+ }
+ }
+ evbuffer_validate(buf);
+
+ tt_uint_op(sum, ==, evbuffer_get_length(buf));
+
+ {
+ size_t a,w,u;
+ a=w=u=0;
+ evbuffer_get_waste(buf, &a, &w, &u);
+ if (0)
+ printf("Allocated: %u.\nWasted: %u.\nUsed: %u.",
+ (unsigned)a, (unsigned)w, (unsigned)u);
+ tt_assert( ((double)w)/a < .125);
+ }
+ end:
+ evbuffer_free(buf);
+
+}
+
+static void
+test_evbuffer_find(void *ptr)
+{
+ unsigned char* p;
+ const char* test1 = "1234567890\r\n";
+ const char* test2 = "1234567890\r";
+#define EVBUFFER_INITIAL_LENGTH 256
+ char test3[EVBUFFER_INITIAL_LENGTH];
+ unsigned int i;
+ struct evbuffer * buf = evbuffer_new();
+
+ tt_assert(buf);
+
+ /* make sure evbuffer_find doesn't match past the end of the buffer */
+ evbuffer_add(buf, (unsigned char*)test1, strlen(test1));
+ evbuffer_validate(buf);
+ evbuffer_drain(buf, strlen(test1));
+ evbuffer_validate(buf);
+ evbuffer_add(buf, (unsigned char*)test2, strlen(test2));
+ evbuffer_validate(buf);
+ p = evbuffer_find(buf, (unsigned char*)"\r\n", 2);
+ tt_want(p == NULL);
+
+ /*
+ * drain the buffer and do another find; in r309 this would
+ * read past the allocated buffer causing a valgrind error.
+ */
+ evbuffer_drain(buf, strlen(test2));
+ evbuffer_validate(buf);
+ for (i = 0; i < EVBUFFER_INITIAL_LENGTH; ++i)
+ test3[i] = 'a';
+ test3[EVBUFFER_INITIAL_LENGTH - 1] = 'x';
+ evbuffer_add(buf, (unsigned char *)test3, EVBUFFER_INITIAL_LENGTH);
+ evbuffer_validate(buf);
+ p = evbuffer_find(buf, (unsigned char *)"xy", 2);
+ tt_want(p == NULL);
+
+ /* simple test for match at end of allocated buffer */
+ p = evbuffer_find(buf, (unsigned char *)"ax", 2);
+ tt_assert(p != NULL);
+ tt_want(strncmp((char*)p, "ax", 2) == 0);
+
+end:
+ if (buf)
+ evbuffer_free(buf);
+}
+
+static void
+test_evbuffer_ptr_set(void *ptr)
+{
+ struct evbuffer *buf = evbuffer_new();
+ struct evbuffer_ptr pos;
+ struct evbuffer_iovec v[1];
+
+ tt_assert(buf);
+
+ tt_int_op(evbuffer_get_length(buf), ==, 0);
+
+ tt_assert(evbuffer_ptr_set(buf, &pos, 0, EVBUFFER_PTR_SET) == 0);
+ tt_assert(pos.pos == 0);
+ tt_assert(evbuffer_ptr_set(buf, &pos, 1, EVBUFFER_PTR_ADD) == -1);
+ tt_assert(pos.pos == -1);
+ tt_assert(evbuffer_ptr_set(buf, &pos, 1, EVBUFFER_PTR_SET) == -1);
+ tt_assert(pos.pos == -1);
+
+ /* create some chains */
+ evbuffer_reserve_space(buf, 5000, v, 1);
+ v[0].iov_len = 5000;
+ memset(v[0].iov_base, 1, v[0].iov_len);
+ evbuffer_commit_space(buf, v, 1);
+ evbuffer_validate(buf);
+
+ evbuffer_reserve_space(buf, 4000, v, 1);
+ v[0].iov_len = 4000;
+ memset(v[0].iov_base, 2, v[0].iov_len);
+ evbuffer_commit_space(buf, v, 1);
+
+ evbuffer_reserve_space(buf, 3000, v, 1);
+ v[0].iov_len = 3000;
+ memset(v[0].iov_base, 3, v[0].iov_len);
+ evbuffer_commit_space(buf, v, 1);
+ evbuffer_validate(buf);
+
+ tt_int_op(evbuffer_get_length(buf), ==, 12000);
+
+ tt_assert(evbuffer_ptr_set(buf, &pos, 13000, EVBUFFER_PTR_SET) == -1);
+ tt_assert(pos.pos == -1);
+ tt_assert(evbuffer_ptr_set(buf, &pos, 0, EVBUFFER_PTR_SET) == 0);
+ tt_assert(pos.pos == 0);
+ tt_assert(evbuffer_ptr_set(buf, &pos, 13000, EVBUFFER_PTR_ADD) == -1);
+
+ tt_assert(evbuffer_ptr_set(buf, &pos, 0, EVBUFFER_PTR_SET) == 0);
+ tt_assert(pos.pos == 0);
+ tt_assert(evbuffer_ptr_set(buf, &pos, 10000, EVBUFFER_PTR_ADD) == 0);
+ tt_assert(pos.pos == 10000);
+ tt_assert(evbuffer_ptr_set(buf, &pos, 1000, EVBUFFER_PTR_ADD) == 0);
+ tt_assert(pos.pos == 11000);
+ tt_assert(evbuffer_ptr_set(buf, &pos, 1000, EVBUFFER_PTR_ADD) == 0);
+ tt_assert(pos.pos == 12000);
+ tt_assert(evbuffer_ptr_set(buf, &pos, 1000, EVBUFFER_PTR_ADD) == -1);
+ tt_assert(pos.pos == -1);
+
+end:
+ if (buf)
+ evbuffer_free(buf);
+}
+
+static void
+test_evbuffer_search(void *ptr)
+{
+ struct evbuffer *buf = evbuffer_new();
+ struct evbuffer *tmp = evbuffer_new();
+ struct evbuffer_ptr pos, end;
+
+ tt_assert(buf);
+ tt_assert(tmp);
+
+ pos = evbuffer_search(buf, "x", 1, NULL);
+ tt_int_op(pos.pos, ==, -1);
+ tt_assert(evbuffer_ptr_set(buf, &pos, 0, EVBUFFER_PTR_SET) == 0);
+ pos = evbuffer_search(buf, "x", 1, &pos);
+ tt_int_op(pos.pos, ==, -1);
+ tt_assert(evbuffer_ptr_set(buf, &pos, 0, EVBUFFER_PTR_SET) == 0);
+ pos = evbuffer_search_range(buf, "x", 1, &pos, &pos);
+ tt_int_op(pos.pos, ==, -1);
+ tt_assert(evbuffer_ptr_set(buf, &pos, 0, EVBUFFER_PTR_SET) == 0);
+ pos = evbuffer_search_range(buf, "x", 1, &pos, NULL);
+ tt_int_op(pos.pos, ==, -1);
+
+ /* set up our chains */
+ evbuffer_add_printf(tmp, "hello"); /* 5 chars */
+ evbuffer_add_buffer(buf, tmp);
+ evbuffer_add_printf(tmp, "foo"); /* 3 chars */
+ evbuffer_add_buffer(buf, tmp);
+ evbuffer_add_printf(tmp, "cat"); /* 3 chars */
+ evbuffer_add_buffer(buf, tmp);
+ evbuffer_add_printf(tmp, "attack");
+ evbuffer_add_buffer(buf, tmp);
+
+ pos = evbuffer_search(buf, "attack", 6, NULL);
+ tt_int_op(pos.pos, ==, 11);
+ pos = evbuffer_search(buf, "attacker", 8, NULL);
+ tt_int_op(pos.pos, ==, -1);
+
+ /* test continuing search */
+ pos = evbuffer_search(buf, "oc", 2, NULL);
+ tt_int_op(pos.pos, ==, 7);
+ pos = evbuffer_search(buf, "cat", 3, &pos);
+ tt_int_op(pos.pos, ==, 8);
+ pos = evbuffer_search(buf, "tacking", 7, &pos);
+ tt_int_op(pos.pos, ==, -1);
+
+ evbuffer_ptr_set(buf, &pos, 5, EVBUFFER_PTR_SET);
+ pos = evbuffer_search(buf, "foo", 3, &pos);
+ tt_int_op(pos.pos, ==, 5);
+
+ evbuffer_ptr_set(buf, &pos, 2, EVBUFFER_PTR_ADD);
+ pos = evbuffer_search(buf, "tat", 3, &pos);
+ tt_int_op(pos.pos, ==, 10);
+
+ /* test bounded search. */
+ /* Set "end" to the first t in "attack". */
+ evbuffer_ptr_set(buf, &end, 12, EVBUFFER_PTR_SET);
+ pos = evbuffer_search_range(buf, "foo", 3, NULL, &end);
+ tt_int_op(pos.pos, ==, 5);
+ pos = evbuffer_search_range(buf, "foocata", 7, NULL, &end);
+ tt_int_op(pos.pos, ==, 5);
+ pos = evbuffer_search_range(buf, "foocatat", 8, NULL, &end);
+ tt_int_op(pos.pos, ==, -1);
+ pos = evbuffer_search_range(buf, "ack", 3, NULL, &end);
+ tt_int_op(pos.pos, ==, -1);
+
+ /* Set "end" after the last byte in the buffer. */
+ tt_assert(evbuffer_ptr_set(buf, &end, 17, EVBUFFER_PTR_SET) == 0);
+
+ pos = evbuffer_search_range(buf, "attack", 6, NULL, &end);
+ tt_int_op(pos.pos, ==, 11);
+ tt_assert(evbuffer_ptr_set(buf, &pos, 11, EVBUFFER_PTR_SET) == 0);
+ pos = evbuffer_search_range(buf, "attack", 6, &pos, &end);
+ tt_int_op(pos.pos, ==, 11);
+ tt_assert(evbuffer_ptr_set(buf, &pos, 17, EVBUFFER_PTR_SET) == 0);
+ pos = evbuffer_search_range(buf, "attack", 6, &pos, &end);
+ tt_int_op(pos.pos, ==, -1);
+ tt_assert(evbuffer_ptr_set(buf, &pos, 17, EVBUFFER_PTR_SET) == 0);
+ pos = evbuffer_search_range(buf, "attack", 6, &pos, NULL);
+ tt_int_op(pos.pos, ==, -1);
+
+end:
+ if (buf)
+ evbuffer_free(buf);
+ if (tmp)
+ evbuffer_free(tmp);
+}
+
+static void
+log_change_callback(struct evbuffer *buffer,
+ const struct evbuffer_cb_info *cbinfo,
+ void *arg)
+{
+
+ size_t old_len = cbinfo->orig_size;
+ size_t new_len = old_len + cbinfo->n_added - cbinfo->n_deleted;
+ struct evbuffer *out = arg;
+ evbuffer_add_printf(out, "%lu->%lu; ", (unsigned long)old_len,
+ (unsigned long)new_len);
+}
+static void
+self_draining_callback(struct evbuffer *evbuffer, size_t old_len,
+ size_t new_len, void *arg)
+{
+ if (new_len > old_len)
+ evbuffer_drain(evbuffer, new_len);
+}
+
+static void
+test_evbuffer_callbacks(void *ptr)
+{
+ struct evbuffer *buf = evbuffer_new();
+ struct evbuffer *buf_out1 = evbuffer_new();
+ struct evbuffer *buf_out2 = evbuffer_new();
+ struct evbuffer_cb_entry *cb1, *cb2;
+
+ tt_assert(buf);
+ tt_assert(buf_out1);
+ tt_assert(buf_out2);
+
+ cb1 = evbuffer_add_cb(buf, log_change_callback, buf_out1);
+ cb2 = evbuffer_add_cb(buf, log_change_callback, buf_out2);
+
+ /* Let's run through adding and deleting some stuff from the buffer
+ * and turning the callbacks on and off and removing them. The callback
+ * adds a summary of length changes to buf_out1/buf_out2 when called. */
+ /* size: 0-> 36. */
+ evbuffer_add_printf(buf, "The %d magic words are spotty pudding", 2);
+ evbuffer_validate(buf);
+ evbuffer_cb_clear_flags(buf, cb2, EVBUFFER_CB_ENABLED);
+ evbuffer_drain(buf, 10); /*36->26*/
+ evbuffer_validate(buf);
+ evbuffer_prepend(buf, "Hello", 5);/*26->31*/
+ evbuffer_cb_set_flags(buf, cb2, EVBUFFER_CB_ENABLED);
+ evbuffer_add_reference(buf, "Goodbye", 7, NULL, NULL); /*31->38*/
+ evbuffer_remove_cb_entry(buf, cb1);
+ evbuffer_validate(buf);
+ evbuffer_drain(buf, evbuffer_get_length(buf)); /*38->0*/;
+ tt_assert(-1 == evbuffer_remove_cb(buf, log_change_callback, NULL));
+ evbuffer_add(buf, "X", 1); /* 0->1 */
+ tt_assert(!evbuffer_remove_cb(buf, log_change_callback, buf_out2));
+ evbuffer_validate(buf);
+
+ tt_str_op((const char *) evbuffer_pullup(buf_out1, -1), ==,
+ "0->36; 36->26; 26->31; 31->38; ");
+ tt_str_op((const char *) evbuffer_pullup(buf_out2, -1), ==,
+ "0->36; 31->38; 38->0; 0->1; ");
+ evbuffer_drain(buf_out1, evbuffer_get_length(buf_out1));
+ evbuffer_drain(buf_out2, evbuffer_get_length(buf_out2));
+ /* Let's test the obsolete buffer_setcb function too. */
+ cb1 = evbuffer_add_cb(buf, log_change_callback, buf_out1);
+ tt_assert(cb1 != NULL);
+ cb2 = evbuffer_add_cb(buf, log_change_callback, buf_out2);
+ tt_assert(cb2 != NULL);
+ evbuffer_setcb(buf, self_draining_callback, NULL);
+ evbuffer_add_printf(buf, "This should get drained right away.");
+ tt_uint_op(evbuffer_get_length(buf), ==, 0);
+ tt_uint_op(evbuffer_get_length(buf_out1), ==, 0);
+ tt_uint_op(evbuffer_get_length(buf_out2), ==, 0);
+ evbuffer_setcb(buf, NULL, NULL);
+ evbuffer_add_printf(buf, "This will not.");
+ tt_str_op((const char *) evbuffer_pullup(buf, -1), ==, "This will not.");
+ evbuffer_validate(buf);
+ evbuffer_drain(buf, evbuffer_get_length(buf));
+ evbuffer_validate(buf);
+#if 0
+ /* Now let's try a suspended callback. */
+ cb1 = evbuffer_add_cb(buf, log_change_callback, buf_out1);
+ cb2 = evbuffer_add_cb(buf, log_change_callback, buf_out2);
+ evbuffer_cb_suspend(buf,cb2);
+ evbuffer_prepend(buf,"Hello world",11); /*0->11*/
+ evbuffer_validate(buf);
+ evbuffer_cb_suspend(buf,cb1);
+ evbuffer_add(buf,"more",4); /* 11->15 */
+ evbuffer_cb_unsuspend(buf,cb2);
+ evbuffer_drain(buf, 4); /* 15->11 */
+ evbuffer_cb_unsuspend(buf,cb1);
+ evbuffer_drain(buf, evbuffer_get_length(buf)); /* 11->0 */
+
+ tt_str_op(evbuffer_pullup(buf_out1, -1), ==,
+ "0->11; 11->11; 11->0; ");
+ tt_str_op(evbuffer_pullup(buf_out2, -1), ==,
+ "0->15; 15->11; 11->0; ");
+#endif
+
+ end:
+ if (buf)
+ evbuffer_free(buf);
+ if (buf_out1)
+ evbuffer_free(buf_out1);
+ if (buf_out2)
+ evbuffer_free(buf_out2);
+}
+
+static int ref_done_cb_called_count = 0;
+static void *ref_done_cb_called_with = NULL;
+static const void *ref_done_cb_called_with_data = NULL;
+static size_t ref_done_cb_called_with_len = 0;
+static void ref_done_cb(const void *data, size_t len, void *info)
+{
+ ++ref_done_cb_called_count;
+ ref_done_cb_called_with = info;
+ ref_done_cb_called_with_data = data;
+ ref_done_cb_called_with_len = len;
+}
+
+static void
+test_evbuffer_add_reference(void *ptr)
+{
+ const char chunk1[] = "If you have found the answer to such a problem";
+ const char chunk2[] = "you ought to write it up for publication";
+ /* -- Knuth's "Notes on the Exercises" from TAOCP */
+ char tmp[16];
+ size_t len1 = strlen(chunk1), len2=strlen(chunk2);
+
+ struct evbuffer *buf1 = NULL, *buf2 = NULL;
+
+ buf1 = evbuffer_new();
+ tt_assert(buf1);
+
+ evbuffer_add_reference(buf1, chunk1, len1, ref_done_cb, (void*)111);
+ evbuffer_add(buf1, ", ", 2);
+ evbuffer_add_reference(buf1, chunk2, len2, ref_done_cb, (void*)222);
+ tt_int_op(evbuffer_get_length(buf1), ==, len1+len2+2);
+
+ /* Make sure we can drain a little from a reference. */
+ tt_int_op(evbuffer_remove(buf1, tmp, 6), ==, 6);
+ tt_int_op(memcmp(tmp, "If you", 6), ==, 0);
+ tt_int_op(evbuffer_remove(buf1, tmp, 5), ==, 5);
+ tt_int_op(memcmp(tmp, " have", 5), ==, 0);
+
+ /* Make sure that prepending does not meddle with immutable data */
+ tt_int_op(evbuffer_prepend(buf1, "I have ", 7), ==, 0);
+ tt_int_op(memcmp(chunk1, "If you", 6), ==, 0);
+ evbuffer_validate(buf1);
+
+ /* Make sure that when the chunk is over, the callback is invoked. */
+ evbuffer_drain(buf1, 7); /* Remove prepended stuff. */
+ evbuffer_drain(buf1, len1-11-1); /* remove all but one byte of chunk1 */
+ tt_int_op(ref_done_cb_called_count, ==, 0);
+ evbuffer_remove(buf1, tmp, 1);
+ tt_int_op(tmp[0], ==, 'm');
+ tt_assert(ref_done_cb_called_with == (void*)111);
+ tt_assert(ref_done_cb_called_with_data == chunk1);
+ tt_assert(ref_done_cb_called_with_len == len1);
+ tt_int_op(ref_done_cb_called_count, ==, 1);
+ evbuffer_validate(buf1);
+
+ /* Drain some of the remaining chunk, then add it to another buffer */
+ evbuffer_drain(buf1, 6); /* Remove the ", you ". */
+ buf2 = evbuffer_new();
+ tt_assert(buf2);
+ tt_int_op(ref_done_cb_called_count, ==, 1);
+ evbuffer_add(buf2, "I ", 2);
+
+ evbuffer_add_buffer(buf2, buf1);
+ tt_int_op(ref_done_cb_called_count, ==, 1);
+ evbuffer_remove(buf2, tmp, 16);
+ tt_int_op(memcmp("I ought to write", tmp, 16), ==, 0);
+ evbuffer_drain(buf2, evbuffer_get_length(buf2));
+ tt_int_op(ref_done_cb_called_count, ==, 2);
+ tt_assert(ref_done_cb_called_with == (void*)222);
+ evbuffer_validate(buf2);
+
+ /* Now add more stuff to buf1 and make sure that it gets removed on
+ * free. */
+ evbuffer_add(buf1, "You shake and shake the ", 24);
+ evbuffer_add_reference(buf1, "ketchup bottle", 14, ref_done_cb,
+ (void*)3333);
+ evbuffer_add(buf1, ". Nothing comes and then a lot'll.", 35);
+ evbuffer_free(buf1);
+ buf1 = NULL;
+ tt_int_op(ref_done_cb_called_count, ==, 3);
+ tt_assert(ref_done_cb_called_with == (void*)3333);
+
+end:
+ if (buf1)
+ evbuffer_free(buf1);
+ if (buf2)
+ evbuffer_free(buf2);
+}
+
+static void
+test_evbuffer_multicast(void *ptr)
+{
+ const char chunk1[] = "If you have found the answer to such a problem";
+ const char chunk2[] = "you ought to write it up for publication";
+ /* -- Knuth's "Notes on the Exercises" from TAOCP */
+ char tmp[16];
+ size_t len1 = strlen(chunk1), len2=strlen(chunk2);
+
+ struct evbuffer *buf1 = NULL, *buf2 = NULL;
+
+ buf1 = evbuffer_new();
+ tt_assert(buf1);
+
+ evbuffer_add(buf1, chunk1, len1);
+ evbuffer_add(buf1, ", ", 2);
+ evbuffer_add(buf1, chunk2, len2);
+ tt_int_op(evbuffer_get_length(buf1), ==, len1+len2+2);
+
+ buf2 = evbuffer_new();
+ tt_assert(buf2);
+
+ tt_int_op(evbuffer_add_buffer_reference(buf2, buf1), ==, 0);
+ /* nested references are not allowed */
+ tt_int_op(evbuffer_add_buffer_reference(buf2, buf2), ==, -1);
+ tt_int_op(evbuffer_add_buffer_reference(buf1, buf2), ==, -1);
+
+ /* both buffers contain the same amount of data */
+ tt_int_op(evbuffer_get_length(buf1), ==, evbuffer_get_length(buf1));
+
+ /* Make sure we can drain a little from the first buffer. */
+ tt_int_op(evbuffer_remove(buf1, tmp, 6), ==, 6);
+ tt_int_op(memcmp(tmp, "If you", 6), ==, 0);
+ tt_int_op(evbuffer_remove(buf1, tmp, 5), ==, 5);
+ tt_int_op(memcmp(tmp, " have", 5), ==, 0);
+
+ /* Make sure that prepending does not meddle with immutable data */
+ tt_int_op(evbuffer_prepend(buf1, "I have ", 7), ==, 0);
+ tt_int_op(memcmp(chunk1, "If you", 6), ==, 0);
+ evbuffer_validate(buf1);
+
+ /* Make sure we can drain a little from the second buffer. */
+ tt_int_op(evbuffer_remove(buf2, tmp, 6), ==, 6);
+ tt_int_op(memcmp(tmp, "If you", 6), ==, 0);
+ tt_int_op(evbuffer_remove(buf2, tmp, 5), ==, 5);
+ tt_int_op(memcmp(tmp, " have", 5), ==, 0);
+
+ /* Make sure that prepending does not meddle with immutable data */
+ tt_int_op(evbuffer_prepend(buf2, "I have ", 7), ==, 0);
+ tt_int_op(memcmp(chunk1, "If you", 6), ==, 0);
+ evbuffer_validate(buf2);
+
+ /* Make sure the data can be read from the second buffer when the first is freed */
+ evbuffer_free(buf1);
+ buf1 = NULL;
+
+ tt_int_op(evbuffer_remove(buf2, tmp, 6), ==, 6);
+ tt_int_op(memcmp(tmp, "I have", 6), ==, 0);
+
+ tt_int_op(evbuffer_remove(buf2, tmp, 6), ==, 6);
+ tt_int_op(memcmp(tmp, " foun", 6), ==, 0);
+
+end:
+ if (buf1)
+ evbuffer_free(buf1);
+ if (buf2)
+ evbuffer_free(buf2);
+}
+
+static void
+test_evbuffer_multicast_drain(void *ptr)
+{
+ const char chunk1[] = "If you have found the answer to such a problem";
+ const char chunk2[] = "you ought to write it up for publication";
+ /* -- Knuth's "Notes on the Exercises" from TAOCP */
+ size_t len1 = strlen(chunk1), len2=strlen(chunk2);
+
+ struct evbuffer *buf1 = NULL, *buf2 = NULL;
+
+ buf1 = evbuffer_new();
+ tt_assert(buf1);
+
+ evbuffer_add(buf1, chunk1, len1);
+ evbuffer_add(buf1, ", ", 2);
+ evbuffer_add(buf1, chunk2, len2);
+ tt_int_op(evbuffer_get_length(buf1), ==, len1+len2+2);
+
+ buf2 = evbuffer_new();
+ tt_assert(buf2);
+
+ tt_int_op(evbuffer_add_buffer_reference(buf2, buf1), ==, 0);
+ tt_int_op(evbuffer_get_length(buf2), ==, len1+len2+2);
+ tt_int_op(evbuffer_drain(buf1, evbuffer_get_length(buf1)), ==, 0);
+ tt_int_op(evbuffer_get_length(buf2), ==, len1+len2+2);
+ tt_int_op(evbuffer_drain(buf2, evbuffer_get_length(buf2)), ==, 0);
+ evbuffer_validate(buf1);
+ evbuffer_validate(buf2);
+
+end:
+ if (buf1)
+ evbuffer_free(buf1);
+ if (buf2)
+ evbuffer_free(buf2);
+}
+
+/* Some cases that we didn't get in test_evbuffer() above, for more coverage. */
+static void
+test_evbuffer_prepend(void *ptr)
+{
+ struct evbuffer *buf1 = NULL, *buf2 = NULL;
+ char tmp[128];
+ int n;
+
+ buf1 = evbuffer_new();
+ tt_assert(buf1);
+
+ /* Case 0: The evbuffer is entirely empty. */
+ evbuffer_prepend(buf1, "This string has 29 characters", 29);
+ evbuffer_validate(buf1);
+
+ /* Case 1: Prepend goes entirely in new chunk. */
+ evbuffer_prepend(buf1, "Short.", 6);
+ evbuffer_validate(buf1);
+
+ /* Case 2: prepend goes entirely in first chunk. */
+ evbuffer_drain(buf1, 6+11);
+ evbuffer_prepend(buf1, "it", 2);
+ evbuffer_validate(buf1);
+ tt_assert(!memcmp(buf1->first->buffer+buf1->first->misalign,
+ "it has", 6));
+
+ /* Case 3: prepend is split over multiple chunks. */
+ evbuffer_prepend(buf1, "It is no longer true to say ", 28);
+ evbuffer_validate(buf1);
+ n = evbuffer_remove(buf1, tmp, sizeof(tmp)-1);
+ tt_int_op(n, >=, 0);
+ tmp[n]='\0';
+ tt_str_op(tmp,==,"It is no longer true to say it has 29 characters");
+
+ buf2 = evbuffer_new();
+ tt_assert(buf2);
+
+ /* Case 4: prepend a buffer to an empty buffer. */
+ n = 999;
+ evbuffer_add_printf(buf1, "Here is string %d. ", n++);
+ evbuffer_prepend_buffer(buf2, buf1);
+ evbuffer_validate(buf2);
+
+ /* Case 5: prepend a buffer to a nonempty buffer. */
+ evbuffer_add_printf(buf1, "Here is string %d. ", n++);
+ evbuffer_prepend_buffer(buf2, buf1);
+ evbuffer_validate(buf2);
+ evbuffer_validate(buf1);
+ n = evbuffer_remove(buf2, tmp, sizeof(tmp)-1);
+ tt_int_op(n, >=, 0);
+ tmp[n]='\0';
+ tt_str_op(tmp,==,"Here is string 1000. Here is string 999. ");
+
+end:
+ if (buf1)
+ evbuffer_free(buf1);
+ if (buf2)
+ evbuffer_free(buf2);
+
+}
+
+static void
+test_evbuffer_peek_first_gt(void *info)
+{
+ struct evbuffer *buf = NULL, *tmp_buf = NULL;
+ struct evbuffer_ptr ptr;
+ struct evbuffer_iovec v[2];
+
+ buf = evbuffer_new();
+ tmp_buf = evbuffer_new();
+ evbuffer_add_printf(tmp_buf, "Contents of chunk 100\n");
+ evbuffer_add_buffer(buf, tmp_buf);
+ evbuffer_add_printf(tmp_buf, "Contents of chunk 1\n");
+ evbuffer_add_buffer(buf, tmp_buf);
+
+ evbuffer_ptr_set(buf, &ptr, 0, EVBUFFER_PTR_SET);
+
+ /** The only case that matters*/
+ tt_int_op(evbuffer_peek(buf, -1, &ptr, NULL, 0), ==, 2);
+ /** Just in case */
+ tt_int_op(evbuffer_peek(buf, -1, &ptr, v, 2), ==, 2);
+
+ evbuffer_ptr_set(buf, &ptr, 20, EVBUFFER_PTR_ADD);
+ tt_int_op(evbuffer_peek(buf, -1, &ptr, NULL, 0), ==, 2);
+ tt_int_op(evbuffer_peek(buf, -1, &ptr, v, 2), ==, 2);
+ tt_int_op(evbuffer_peek(buf, 2, &ptr, NULL, 0), ==, 1);
+ tt_int_op(evbuffer_peek(buf, 2, &ptr, v, 2), ==, 1);
+ tt_int_op(evbuffer_peek(buf, 3, &ptr, NULL, 0), ==, 2);
+ tt_int_op(evbuffer_peek(buf, 3, &ptr, v, 2), ==, 2);
+
+end:
+ if (buf)
+ evbuffer_free(buf);
+ if (tmp_buf)
+ evbuffer_free(tmp_buf);
+}
+
+static void
+test_evbuffer_peek(void *info)
+{
+ struct evbuffer *buf = NULL, *tmp_buf = NULL;
+ int i;
+ struct evbuffer_iovec v[20];
+ struct evbuffer_ptr ptr;
+
+#define tt_iov_eq(v, s) \
+ tt_int_op((v)->iov_len, ==, strlen(s)); \
+ tt_assert(!memcmp((v)->iov_base, (s), strlen(s)))
+
+ /* Let's make a very fragmented buffer. */
+ buf = evbuffer_new();
+ tmp_buf = evbuffer_new();
+ for (i = 0; i < 16; ++i) {
+ evbuffer_add_printf(tmp_buf, "Contents of chunk [%d]\n", i);
+ evbuffer_add_buffer(buf, tmp_buf);
+ }
+
+ /* How many chunks do we need for everything? */
+ i = evbuffer_peek(buf, -1, NULL, NULL, 0);
+ tt_int_op(i, ==, 16);
+
+ /* Simple peek: get everything. */
+ i = evbuffer_peek(buf, -1, NULL, v, 20);
+ tt_int_op(i, ==, 16); /* we used only 16 chunks. */
+ tt_iov_eq(&v[0], "Contents of chunk [0]\n");
+ tt_iov_eq(&v[3], "Contents of chunk [3]\n");
+ tt_iov_eq(&v[12], "Contents of chunk [12]\n");
+ tt_iov_eq(&v[15], "Contents of chunk [15]\n");
+
+ /* Just get one chunk worth. */
+ memset(v, 0, sizeof(v));
+ i = evbuffer_peek(buf, -1, NULL, v, 1);
+ tt_int_op(i, ==, 1);
+ tt_iov_eq(&v[0], "Contents of chunk [0]\n");
+ tt_assert(v[1].iov_base == NULL);
+
+ /* Suppose we want at least the first 40 bytes. */
+ memset(v, 0, sizeof(v));
+ i = evbuffer_peek(buf, 40, NULL, v, 16);
+ tt_int_op(i, ==, 2);
+ tt_iov_eq(&v[0], "Contents of chunk [0]\n");
+ tt_iov_eq(&v[1], "Contents of chunk [1]\n");
+ tt_assert(v[2].iov_base == NULL);
+
+ /* How many chunks do we need for 100 bytes? */
+ memset(v, 0, sizeof(v));
+ i = evbuffer_peek(buf, 100, NULL, NULL, 0);
+ tt_int_op(i, ==, 5);
+ tt_assert(v[0].iov_base == NULL);
+
+ /* Now we ask for more bytes than we provide chunks for */
+ memset(v, 0, sizeof(v));
+ i = evbuffer_peek(buf, 60, NULL, v, 1);
+ tt_int_op(i, ==, 3);
+ tt_iov_eq(&v[0], "Contents of chunk [0]\n");
+ tt_assert(v[1].iov_base == NULL);
+
+ /* Now we ask for more bytes than the buffer has. */
+ memset(v, 0, sizeof(v));
+ i = evbuffer_peek(buf, 65536, NULL, v, 20);
+ tt_int_op(i, ==, 16); /* we used only 16 chunks. */
+ tt_iov_eq(&v[0], "Contents of chunk [0]\n");
+ tt_iov_eq(&v[3], "Contents of chunk [3]\n");
+ tt_iov_eq(&v[12], "Contents of chunk [12]\n");
+ tt_iov_eq(&v[15], "Contents of chunk [15]\n");
+ tt_assert(v[16].iov_base == NULL);
+
+ /* What happens if we try an empty buffer? */
+ memset(v, 0, sizeof(v));
+ i = evbuffer_peek(tmp_buf, -1, NULL, v, 20);
+ tt_int_op(i, ==, 0);
+ tt_assert(v[0].iov_base == NULL);
+ memset(v, 0, sizeof(v));
+ i = evbuffer_peek(tmp_buf, 50, NULL, v, 20);
+ tt_int_op(i, ==, 0);
+ tt_assert(v[0].iov_base == NULL);
+
+ /* Okay, now time to have fun with pointers. */
+ memset(v, 0, sizeof(v));
+ evbuffer_ptr_set(buf, &ptr, 30, EVBUFFER_PTR_SET);
+ i = evbuffer_peek(buf, 50, &ptr, v, 20);
+ tt_int_op(i, ==, 3);
+ tt_iov_eq(&v[0], " of chunk [1]\n");
+ tt_iov_eq(&v[1], "Contents of chunk [2]\n");
+ tt_iov_eq(&v[2], "Contents of chunk [3]\n"); /*more than we asked for*/
+
+ /* advance to the start of another chain. */
+ memset(v, 0, sizeof(v));
+ evbuffer_ptr_set(buf, &ptr, 14, EVBUFFER_PTR_ADD);
+ i = evbuffer_peek(buf, 44, &ptr, v, 20);
+ tt_int_op(i, ==, 2);
+ tt_iov_eq(&v[0], "Contents of chunk [2]\n");
+ tt_iov_eq(&v[1], "Contents of chunk [3]\n"); /*more than we asked for*/
+
+ /* peek at the end of the buffer */
+ memset(v, 0, sizeof(v));
+ tt_assert(evbuffer_ptr_set(buf, &ptr, evbuffer_get_length(buf), EVBUFFER_PTR_SET) == 0);
+ i = evbuffer_peek(buf, 44, &ptr, v, 20);
+ tt_int_op(i, ==, 0);
+ tt_assert(v[0].iov_base == NULL);
+
+end:
+ if (buf)
+ evbuffer_free(buf);
+ if (tmp_buf)
+ evbuffer_free(tmp_buf);
+}
+
+/* Check whether evbuffer freezing works right. This is called twice,
+ once with the argument "start" and once with the argument "end".
+ When we test "start", we freeze the start of an evbuffer and make sure
+ that modifying the start of the buffer doesn't work. When we test
+ "end", we freeze the end of an evbuffer and make sure that modifying
+ the end of the buffer doesn't work.
+ */
+static void
+test_evbuffer_freeze(void *ptr)
+{
+ struct evbuffer *buf = NULL, *tmp_buf=NULL;
+ const char string[] = /* Year's End, Richard Wilbur */
+ "I've known the wind by water banks to shake\n"
+ "The late leaves down, which frozen where they fell\n"
+ "And held in ice as dancers in a spell\n"
+ "Fluttered all winter long into a lake...";
+ const int start = !strcmp(ptr, "start");
+ char *cp;
+ char charbuf[128];
+ int r;
+ size_t orig_length;
+ struct evbuffer_iovec v[1];
+
+ if (!start)
+ tt_str_op(ptr, ==, "end");
+
+ buf = evbuffer_new();
+ tmp_buf = evbuffer_new();
+ tt_assert(tmp_buf);
+
+ evbuffer_add(buf, string, strlen(string));
+ evbuffer_freeze(buf, start); /* Freeze the start or the end.*/
+
+#define FREEZE_EQ(a, startcase, endcase) \
+ do { \
+ if (start) { \
+ tt_int_op((a), ==, (startcase)); \
+ } else { \
+ tt_int_op((a), ==, (endcase)); \
+ } \
+ } while (0)
+
+
+ orig_length = evbuffer_get_length(buf);
+
+ /* These functions all manipulate the end of buf. */
+ r = evbuffer_add(buf, "abc", 0);
+ FREEZE_EQ(r, 0, -1);
+ r = evbuffer_reserve_space(buf, 10, v, 1);
+ FREEZE_EQ(r, 1, -1);
+ if (r == 1) {
+ memset(v[0].iov_base, 'X', 10);
+ v[0].iov_len = 10;
+ }
+ r = evbuffer_commit_space(buf, v, 1);
+ FREEZE_EQ(r, 0, -1);
+ r = evbuffer_add_reference(buf, string, 5, NULL, NULL);
+ FREEZE_EQ(r, 0, -1);
+ r = evbuffer_add_printf(buf, "Hello %s", "world");
+ FREEZE_EQ(r, 11, -1);
+ /* TODO: test add_buffer, add_file, read */
+
+ if (!start)
+ tt_int_op(orig_length, ==, evbuffer_get_length(buf));
+
+ orig_length = evbuffer_get_length(buf);
+
+ /* These functions all manipulate the start of buf. */
+ r = evbuffer_remove(buf, charbuf, 1);
+ FREEZE_EQ(r, -1, 1);
+ r = evbuffer_drain(buf, 3);
+ FREEZE_EQ(r, -1, 0);
+ r = evbuffer_prepend(buf, "dummy", 5);
+ FREEZE_EQ(r, -1, 0);
+ cp = evbuffer_readln(buf, NULL, EVBUFFER_EOL_LF);
+ FREEZE_EQ(cp==NULL, 1, 0);
+ if (cp)
+ free(cp);
+ /* TODO: Test remove_buffer, add_buffer, write, prepend_buffer */
+
+ if (start)
+ tt_int_op(orig_length, ==, evbuffer_get_length(buf));
+
+end:
+ if (buf)
+ evbuffer_free(buf);
+
+ if (tmp_buf)
+ evbuffer_free(tmp_buf);
+}
+
+static void
+test_evbuffer_add_iovec(void * ptr)
+{
+ struct evbuffer * buf = NULL;
+ struct evbuffer_iovec vec[4];
+ const char * data[] = {
+ "Guilt resembles a sword with two edges.",
+ "On the one hand, it cuts for Justice, imposing practical morality upon those who fear it.",
+ "Conscience does not always adhere to rational judgment.",
+ "Guilt is always a self-imposed burden, but it is not always rightly imposed."
+ /* -- R.A. Salvatore, _Sojurn_ */
+ };
+ size_t expected_length = 0;
+ size_t returned_length = 0;
+ int i;
+
+ buf = evbuffer_new();
+
+ tt_assert(buf);
+
+ for (i = 0; i < 4; i++) {
+ vec[i].iov_len = strlen(data[i]);
+ vec[i].iov_base = (char*) data[i];
+ expected_length += vec[i].iov_len;
+ }
+
+ returned_length = evbuffer_add_iovec(buf, vec, 4);
+
+ tt_int_op(returned_length, ==, evbuffer_get_length(buf));
+ tt_int_op(evbuffer_get_length(buf), ==, expected_length);
+
+ for (i = 0; i < 4; i++) {
+ char charbuf[1024];
+
+ memset(charbuf, 0, 1024);
+ evbuffer_remove(buf, charbuf, strlen(data[i]));
+ tt_assert(strcmp(charbuf, data[i]) == 0);
+ }
+
+ tt_assert(evbuffer_get_length(buf) == 0);
+end:
+ if (buf) {
+ evbuffer_free(buf);
+ }
+}
+
+static void
+test_evbuffer_copyout(void *dummy)
+{
+ const char string[] =
+ "Still they skirmish to and fro, men my messmates on the snow "
+ "When we headed off the aurochs turn for turn; "
+ "When the rich Allobrogenses never kept amanuenses, "
+ "And our only plots were piled in lakes at Berne.";
+ /* -- Kipling, "In The Neolithic Age" */
+ char tmp[1024];
+ struct evbuffer_ptr ptr;
+ struct evbuffer *buf;
+
+ (void)dummy;
+
+ buf = evbuffer_new();
+ tt_assert(buf);
+
+ tt_int_op(strlen(string), ==, 206);
+
+ /* Ensure separate chains */
+ evbuffer_add_reference(buf, string, 80, no_cleanup, NULL);
+ evbuffer_add_reference(buf, string+80, 80, no_cleanup, NULL);
+ evbuffer_add(buf, string+160, strlen(string)-160);
+
+ tt_int_op(206, ==, evbuffer_get_length(buf));
+
+ /* First, let's test plain old copyout. */
+
+ /* Copy a little from the beginning. */
+ tt_int_op(10, ==, evbuffer_copyout(buf, tmp, 10));
+ tt_int_op(0, ==, memcmp(tmp, "Still they", 10));
+
+ /* Now copy more than a little from the beginning */
+ memset(tmp, 0, sizeof(tmp));
+ tt_int_op(100, ==, evbuffer_copyout(buf, tmp, 100));
+ tt_int_op(0, ==, memcmp(tmp, string, 100));
+
+ /* Copy too much; ensure truncation. */
+ memset(tmp, 0, sizeof(tmp));
+ tt_int_op(206, ==, evbuffer_copyout(buf, tmp, 230));
+ tt_int_op(0, ==, memcmp(tmp, string, 206));
+
+ /* That was supposed to be nondestructive, btw */
+ tt_int_op(206, ==, evbuffer_get_length(buf));
+
+ /* Now it's time to test copyout_from! First, let's start in the
+ * first chain. */
+ evbuffer_ptr_set(buf, &ptr, 15, EVBUFFER_PTR_SET);
+ memset(tmp, 0, sizeof(tmp));
+ tt_int_op(10, ==, evbuffer_copyout_from(buf, &ptr, tmp, 10));
+ tt_int_op(0, ==, memcmp(tmp, "mish to an", 10));
+
+ /* Right up to the end of the first chain */
+ memset(tmp, 0, sizeof(tmp));
+ tt_int_op(65, ==, evbuffer_copyout_from(buf, &ptr, tmp, 65));
+ tt_int_op(0, ==, memcmp(tmp, string+15, 65));
+
+ /* Span into the second chain */
+ memset(tmp, 0, sizeof(tmp));
+ tt_int_op(90, ==, evbuffer_copyout_from(buf, &ptr, tmp, 90));
+ tt_int_op(0, ==, memcmp(tmp, string+15, 90));
+
+ /* Span into the third chain */
+ memset(tmp, 0, sizeof(tmp));
+ tt_int_op(160, ==, evbuffer_copyout_from(buf, &ptr, tmp, 160));
+ tt_int_op(0, ==, memcmp(tmp, string+15, 160));
+
+ /* Overrun */
+ memset(tmp, 0, sizeof(tmp));
+ tt_int_op(206-15, ==, evbuffer_copyout_from(buf, &ptr, tmp, 999));
+ tt_int_op(0, ==, memcmp(tmp, string+15, 206-15));
+
+ /* That was supposed to be nondestructive, too */
+ tt_int_op(206, ==, evbuffer_get_length(buf));
+
+end:
+ if (buf)
+ evbuffer_free(buf);
+}
+
+static void *
+setup_passthrough(const struct testcase_t *testcase)
+{
+ return testcase->setup_data;
+}
+static int
+cleanup_passthrough(const struct testcase_t *testcase, void *ptr)
+{
+ (void) ptr;
+ return 1;
+}
+
+static const struct testcase_setup_t nil_setup = {
+ setup_passthrough,
+ cleanup_passthrough
+};
+
+struct testcase_t evbuffer_testcases[] = {
+ { "evbuffer", test_evbuffer, 0, NULL, NULL },
+ { "remove_buffer_with_empty", test_evbuffer_remove_buffer_with_empty, 0, NULL, NULL },
+ { "reserve2", test_evbuffer_reserve2, 0, NULL, NULL },
+ { "reserve_many", test_evbuffer_reserve_many, 0, NULL, NULL },
+ { "reserve_many2", test_evbuffer_reserve_many, 0, &nil_setup, (void*)"add" },
+ { "reserve_many3", test_evbuffer_reserve_many, 0, &nil_setup, (void*)"fill" },
+ { "expand", test_evbuffer_expand, 0, NULL, NULL },
+ { "reference", test_evbuffer_reference, 0, NULL, NULL },
+ { "iterative", test_evbuffer_iterative, 0, NULL, NULL },
+ { "readln", test_evbuffer_readln, TT_NO_LOGS, &basic_setup, NULL },
+ { "search_eol", test_evbuffer_search_eol, 0, NULL, NULL },
+ { "find", test_evbuffer_find, 0, NULL, NULL },
+ { "ptr_set", test_evbuffer_ptr_set, 0, NULL, NULL },
+ { "search", test_evbuffer_search, 0, NULL, NULL },
+ { "callbacks", test_evbuffer_callbacks, 0, NULL, NULL },
+ { "add_reference", test_evbuffer_add_reference, 0, NULL, NULL },
+ { "multicast", test_evbuffer_multicast, 0, NULL, NULL },
+ { "multicast_drain", test_evbuffer_multicast_drain, 0, NULL, NULL },
+ { "prepend", test_evbuffer_prepend, TT_FORK, NULL, NULL },
+ { "peek", test_evbuffer_peek, 0, NULL, NULL },
+ { "peek_first_gt", test_evbuffer_peek_first_gt, 0, NULL, NULL },
+ { "freeze_start", test_evbuffer_freeze, 0, &nil_setup, (void*)"start" },
+ { "freeze_end", test_evbuffer_freeze, 0, &nil_setup, (void*)"end" },
+ { "add_iovec", test_evbuffer_add_iovec, 0, NULL, NULL},
+ { "copyout", test_evbuffer_copyout, 0, NULL, NULL},
+ { "file_segment_add_cleanup_cb", test_evbuffer_file_segment_add_cleanup_cb, 0, NULL, NULL },
+
+#define ADDFILE_TEST(name, parameters) \
+ { name, test_evbuffer_add_file, TT_FORK|TT_NEED_BASE, \
+ &basic_setup, (void*)(parameters) }
+
+#define ADDFILE_TEST_GROUP(name, parameters) \
+ ADDFILE_TEST(name "_sendfile", "sendfile " parameters), \
+ ADDFILE_TEST(name "_mmap", "mmap " parameters), \
+ ADDFILE_TEST(name "_linear", "linear " parameters)
+
+ ADDFILE_TEST_GROUP("add_file", ""),
+ ADDFILE_TEST("add_file_nosegment", "default nosegment"),
+
+ ADDFILE_TEST_GROUP("add_big_file", "bigfile"),
+ ADDFILE_TEST("add_big_file_nosegment", "default nosegment bigfile"),
+
+ ADDFILE_TEST_GROUP("add_file_offset", "bigfile map_offset"),
+ ADDFILE_TEST("add_file_offset_nosegment",
+ "default nosegment bigfile map_offset"),
+
+ ADDFILE_TEST_GROUP("add_file_offset2", "bigfile offset_in_segment"),
+
+ ADDFILE_TEST_GROUP("add_file_offset3",
+ "bigfile offset_in_segment map_offset"),
+
+ END_OF_TESTCASES
+};
diff --git a/libs/libevent/docs/test/regress_bufferevent.c b/libs/libevent/docs/test/regress_bufferevent.c
new file mode 100644
index 0000000000..68e6876405
--- /dev/null
+++ b/libs/libevent/docs/test/regress_bufferevent.c
@@ -0,0 +1,1284 @@
+/*
+ * Copyright (c) 2003-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "util-internal.h"
+
+/* The old tests here need assertions to work. */
+#undef NDEBUG
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+#include "event2/event-config.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifndef _WIN32
+#include <sys/socket.h>
+#include <sys/wait.h>
+#include <signal.h>
+#include <unistd.h>
+#include <netdb.h>
+#include <netinet/in.h>
+#endif
+#include <fcntl.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+
+#ifdef EVENT__HAVE_ARPA_INET_H
+#include <arpa/inet.h>
+#endif
+
+#include "event2/event-config.h"
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "event2/event_compat.h"
+#include "event2/tag.h"
+#include "event2/buffer.h"
+#include "event2/bufferevent.h"
+#include "event2/bufferevent_compat.h"
+#include "event2/bufferevent_struct.h"
+#include "event2/listener.h"
+#include "event2/util.h"
+
+#include "bufferevent-internal.h"
+#include "evthread-internal.h"
+#include "util-internal.h"
+#ifdef _WIN32
+#include "iocp-internal.h"
+#endif
+
+#include "regress.h"
+#include "regress_testutils.h"
+
+/*
+ * simple bufferevent test
+ */
+
+static void
+readcb(struct bufferevent *bev, void *arg)
+{
+ if (evbuffer_get_length(bev->input) == 8333) {
+ struct evbuffer *evbuf = evbuffer_new();
+ assert(evbuf != NULL);
+
+ /* gratuitous test of bufferevent_read_buffer */
+ bufferevent_read_buffer(bev, evbuf);
+
+ bufferevent_disable(bev, EV_READ);
+
+ if (evbuffer_get_length(evbuf) == 8333) {
+ test_ok++;
+ }
+
+ evbuffer_free(evbuf);
+ }
+}
+
+static void
+writecb(struct bufferevent *bev, void *arg)
+{
+ if (evbuffer_get_length(bev->output) == 0) {
+ test_ok++;
+ }
+}
+
+static void
+errorcb(struct bufferevent *bev, short what, void *arg)
+{
+ test_ok = -2;
+}
+
+static void
+test_bufferevent_impl(int use_pair, int flush)
+{
+ struct bufferevent *bev1 = NULL, *bev2 = NULL;
+ char buffer[8333];
+ int i;
+ int expected = 2;
+
+ if (use_pair) {
+ struct bufferevent *pair[2];
+ tt_assert(0 == bufferevent_pair_new(NULL, 0, pair));
+ bev1 = pair[0];
+ bev2 = pair[1];
+ bufferevent_setcb(bev1, readcb, writecb, errorcb, bev1);
+ bufferevent_setcb(bev2, readcb, writecb, errorcb, NULL);
+ tt_int_op(bufferevent_getfd(bev1), ==, -1);
+ tt_ptr_op(bufferevent_get_underlying(bev1), ==, NULL);
+ tt_ptr_op(bufferevent_pair_get_partner(bev1), ==, bev2);
+ tt_ptr_op(bufferevent_pair_get_partner(bev2), ==, bev1);
+ } else {
+ bev1 = bufferevent_new(pair[0], readcb, writecb, errorcb, NULL);
+ bev2 = bufferevent_new(pair[1], readcb, writecb, errorcb, NULL);
+ tt_int_op(bufferevent_getfd(bev1), ==, pair[0]);
+ tt_ptr_op(bufferevent_get_underlying(bev1), ==, NULL);
+ tt_ptr_op(bufferevent_pair_get_partner(bev1), ==, NULL);
+ tt_ptr_op(bufferevent_pair_get_partner(bev2), ==, NULL);
+ }
+
+ {
+ /* Test getcb. */
+ bufferevent_data_cb r, w;
+ bufferevent_event_cb e;
+ void *a;
+ bufferevent_getcb(bev1, &r, &w, &e, &a);
+ tt_ptr_op(r, ==, readcb);
+ tt_ptr_op(w, ==, writecb);
+ tt_ptr_op(e, ==, errorcb);
+ tt_ptr_op(a, ==, use_pair ? bev1 : NULL);
+ }
+
+ bufferevent_disable(bev1, EV_READ);
+ bufferevent_enable(bev2, EV_READ);
+
+ tt_int_op(bufferevent_get_enabled(bev1), ==, EV_WRITE);
+ tt_int_op(bufferevent_get_enabled(bev2), ==, EV_WRITE|EV_READ);
+
+ for (i = 0; i < (int)sizeof(buffer); i++)
+ buffer[i] = i;
+
+ bufferevent_write(bev1, buffer, sizeof(buffer));
+ if (flush >= 0) {
+ tt_int_op(bufferevent_flush(bev1, EV_WRITE, flush), >=, 0);
+ }
+
+ event_dispatch();
+
+ bufferevent_free(bev2);
+ tt_ptr_op(bufferevent_pair_get_partner(bev1), ==, NULL);
+ bufferevent_free(bev1);
+
+ /** Only pair call errorcb for BEV_FINISHED */
+ if (use_pair && flush == BEV_FINISHED) {
+ expected = -1;
+ }
+ if (test_ok != expected)
+ test_ok = 0;
+end:
+ ;
+}
+
+static void test_bufferevent(void) { test_bufferevent_impl(0, -1); }
+static void test_bufferevent_pair(void) { test_bufferevent_impl(1, -1); }
+
+static void test_bufferevent_flush_normal(void) { test_bufferevent_impl(0, BEV_NORMAL); }
+static void test_bufferevent_flush_flush(void) { test_bufferevent_impl(0, BEV_FLUSH); }
+static void test_bufferevent_flush_finished(void) { test_bufferevent_impl(0, BEV_FINISHED); }
+
+static void test_bufferevent_pair_flush_normal(void) { test_bufferevent_impl(1, BEV_NORMAL); }
+static void test_bufferevent_pair_flush_flush(void) { test_bufferevent_impl(1, BEV_FLUSH); }
+static void test_bufferevent_pair_flush_finished(void) { test_bufferevent_impl(1, BEV_FINISHED); }
+
+#if defined(EVTHREAD_USE_PTHREADS_IMPLEMENTED)
+/**
+ * Trace lock/unlock/alloc/free for locks.
+ * (More heavier then evthread_debug*)
+ */
+typedef struct
+{
+ void *lock;
+ enum {
+ ALLOC, FREE,
+ } status;
+ size_t locked /** allow recursive locking */;
+} lock_wrapper;
+struct lock_unlock_base
+{
+ /* Original callbacks */
+ struct evthread_lock_callbacks cbs;
+ /* Map of locks */
+ lock_wrapper *locks;
+ size_t nr_locks;
+} lu_base = {
+ .locks = NULL,
+};
+
+static lock_wrapper *lu_find(void *lock_)
+{
+ size_t i;
+ for (i = 0; i < lu_base.nr_locks; ++i) {
+ lock_wrapper *lock = &lu_base.locks[i];
+ if (lock->lock == lock_)
+ return lock;
+ }
+ return NULL;
+}
+
+static void *trace_lock_alloc(unsigned locktype)
+{
+ void *lock;
+ ++lu_base.nr_locks;
+ lu_base.locks = realloc(lu_base.locks,
+ sizeof(lock_wrapper) * lu_base.nr_locks);
+ lock = lu_base.cbs.alloc(locktype);
+ lu_base.locks[lu_base.nr_locks - 1] = (lock_wrapper){ lock, ALLOC, 0 };
+ return lock;
+}
+static void trace_lock_free(void *lock_, unsigned locktype)
+{
+ lock_wrapper *lock = lu_find(lock_);
+ if (!lock || lock->status == FREE || lock->locked) {
+ TT_FAIL(("lock: free error"));
+ } else {
+ lock->status = FREE;
+ lu_base.cbs.free(lock_, locktype);
+ }
+}
+static int trace_lock_lock(unsigned mode, void *lock_)
+{
+ lock_wrapper *lock = lu_find(lock_);
+ if (!lock || lock->status == FREE) {
+ TT_FAIL(("lock: lock error"));
+ return -1;
+ } else {
+ ++lock->locked;
+ return lu_base.cbs.lock(mode, lock_);
+ }
+}
+static int trace_lock_unlock(unsigned mode, void *lock_)
+{
+ lock_wrapper *lock = lu_find(lock_);
+ if (!lock || lock->status == FREE || !lock->locked) {
+ TT_FAIL(("lock: unlock error"));
+ return -1;
+ } else {
+ --lock->locked;
+ return lu_base.cbs.unlock(mode, lock_);
+ }
+}
+static void lock_unlock_free_thread_cbs(void)
+{
+ event_base_free(NULL);
+
+ if (libevent_tests_running_in_debug_mode)
+ libevent_global_shutdown();
+
+ /** drop immutable flag */
+ evthread_set_lock_callbacks(NULL);
+ /** avoid calling of event_global_setup_locks_() for new cbs */
+ libevent_global_shutdown();
+ /** drop immutable flag for non-debug ops (since called after shutdown) */
+ evthread_set_lock_callbacks(NULL);
+}
+
+static int use_lock_unlock_profiler(void)
+{
+ struct evthread_lock_callbacks cbs = {
+ EVTHREAD_LOCK_API_VERSION,
+ EVTHREAD_LOCKTYPE_RECURSIVE,
+ trace_lock_alloc,
+ trace_lock_free,
+ trace_lock_lock,
+ trace_lock_unlock,
+ };
+ memcpy(&lu_base.cbs, evthread_get_lock_callbacks(),
+ sizeof(lu_base.cbs));
+ {
+ lock_unlock_free_thread_cbs();
+
+ evthread_set_lock_callbacks(&cbs);
+ /** re-create debug locks correctly */
+ evthread_enable_lock_debugging();
+
+ event_init();
+ }
+ return 0;
+}
+static void free_lock_unlock_profiler(struct basic_test_data *data)
+{
+ /** fix "held_by" for kqueue */
+ evthread_set_lock_callbacks(NULL);
+
+ lock_unlock_free_thread_cbs();
+ free(lu_base.locks);
+ data->base = NULL;
+}
+
+static void test_bufferevent_pair_release_lock(void *arg)
+{
+ struct basic_test_data *data = arg;
+ use_lock_unlock_profiler();
+ {
+ struct bufferevent *pair[2];
+ if (!bufferevent_pair_new(NULL, BEV_OPT_THREADSAFE, pair)) {
+ bufferevent_free(pair[0]);
+ bufferevent_free(pair[1]);
+ } else
+ tt_abort_perror("bufferevent_pair_new");
+ }
+ free_lock_unlock_profiler(data);
+end:
+ ;
+}
+#endif
+
+/*
+ * test watermarks and bufferevent
+ */
+
+static void
+wm_readcb(struct bufferevent *bev, void *arg)
+{
+ struct evbuffer *evbuf = evbuffer_new();
+ int len = (int)evbuffer_get_length(bev->input);
+ static int nread;
+
+ assert(len >= 10 && len <= 20);
+
+ assert(evbuf != NULL);
+
+ /* gratuitous test of bufferevent_read_buffer */
+ bufferevent_read_buffer(bev, evbuf);
+
+ nread += len;
+ if (nread == 65000) {
+ bufferevent_disable(bev, EV_READ);
+ test_ok++;
+ }
+
+ evbuffer_free(evbuf);
+}
+
+static void
+wm_writecb(struct bufferevent *bev, void *arg)
+{
+ assert(evbuffer_get_length(bev->output) <= 100);
+ if (evbuffer_get_length(bev->output) == 0) {
+ evbuffer_drain(bev->output, evbuffer_get_length(bev->output));
+ test_ok++;
+ }
+}
+
+static void
+wm_errorcb(struct bufferevent *bev, short what, void *arg)
+{
+ test_ok = -2;
+}
+
+static void
+test_bufferevent_watermarks_impl(int use_pair)
+{
+ struct bufferevent *bev1 = NULL, *bev2 = NULL;
+ char buffer[65000];
+ size_t low, high;
+ int i;
+ test_ok = 0;
+
+ if (use_pair) {
+ struct bufferevent *pair[2];
+ tt_assert(0 == bufferevent_pair_new(NULL, 0, pair));
+ bev1 = pair[0];
+ bev2 = pair[1];
+ bufferevent_setcb(bev1, NULL, wm_writecb, errorcb, NULL);
+ bufferevent_setcb(bev2, wm_readcb, NULL, errorcb, NULL);
+ } else {
+ bev1 = bufferevent_new(pair[0], NULL, wm_writecb, wm_errorcb, NULL);
+ bev2 = bufferevent_new(pair[1], wm_readcb, NULL, wm_errorcb, NULL);
+ }
+ tt_assert(bev1);
+ tt_assert(bev2);
+ bufferevent_disable(bev1, EV_READ);
+ bufferevent_enable(bev2, EV_READ);
+
+ /* By default, low watermarks are set to 0 */
+ bufferevent_getwatermark(bev1, EV_READ, &low, NULL);
+ tt_int_op(low, ==, 0);
+ bufferevent_getwatermark(bev2, EV_WRITE, &low, NULL);
+ tt_int_op(low, ==, 0);
+
+ for (i = 0; i < (int)sizeof(buffer); i++)
+ buffer[i] = (char)i;
+
+ /* limit the reading on the receiving bufferevent */
+ bufferevent_setwatermark(bev2, EV_READ, 10, 20);
+
+ bufferevent_getwatermark(bev2, EV_READ, &low, &high);
+ tt_int_op(low, ==, 10);
+ tt_int_op(high, ==, 20);
+
+ /* Tell the sending bufferevent not to notify us till it's down to
+ 100 bytes. */
+ bufferevent_setwatermark(bev1, EV_WRITE, 100, 2000);
+
+ bufferevent_getwatermark(bev1, EV_WRITE, &low, &high);
+ tt_int_op(low, ==, 100);
+ tt_int_op(high, ==, 2000);
+
+ {
+ int r = bufferevent_getwatermark(bev1, EV_WRITE | EV_READ, &low, &high);
+ tt_int_op(r, !=, 0);
+ }
+
+ bufferevent_write(bev1, buffer, sizeof(buffer));
+
+ event_dispatch();
+
+ tt_int_op(test_ok, ==, 2);
+
+ /* The write callback drained all the data from outbuf, so we
+ * should have removed the write event... */
+ tt_assert(!event_pending(&bev2->ev_write, EV_WRITE, NULL));
+
+end:
+ if (bev1)
+ bufferevent_free(bev1);
+ if (bev2)
+ bufferevent_free(bev2);
+}
+
+static void
+test_bufferevent_watermarks(void)
+{
+ test_bufferevent_watermarks_impl(0);
+}
+
+static void
+test_bufferevent_pair_watermarks(void)
+{
+ test_bufferevent_watermarks_impl(1);
+}
+
+/*
+ * Test bufferevent filters
+ */
+
+/* strip an 'x' from each byte */
+
+static enum bufferevent_filter_result
+bufferevent_input_filter(struct evbuffer *src, struct evbuffer *dst,
+ ev_ssize_t lim, enum bufferevent_flush_mode state, void *ctx)
+{
+ const unsigned char *buffer;
+ unsigned i;
+
+ buffer = evbuffer_pullup(src, evbuffer_get_length(src));
+ for (i = 0; i < evbuffer_get_length(src); i += 2) {
+ assert(buffer[i] == 'x');
+ evbuffer_add(dst, buffer + i + 1, 1);
+
+ if (i + 2 > evbuffer_get_length(src))
+ break;
+ }
+
+ evbuffer_drain(src, i);
+ return (BEV_OK);
+}
+
+/* add an 'x' before each byte */
+
+static enum bufferevent_filter_result
+bufferevent_output_filter(struct evbuffer *src, struct evbuffer *dst,
+ ev_ssize_t lim, enum bufferevent_flush_mode state, void *ctx)
+{
+ const unsigned char *buffer;
+ unsigned i;
+
+ buffer = evbuffer_pullup(src, evbuffer_get_length(src));
+ for (i = 0; i < evbuffer_get_length(src); ++i) {
+ evbuffer_add(dst, "x", 1);
+ evbuffer_add(dst, buffer + i, 1);
+ }
+
+ evbuffer_drain(src, evbuffer_get_length(src));
+ return (BEV_OK);
+}
+
+static void
+test_bufferevent_filters_impl(int use_pair)
+{
+ struct bufferevent *bev1 = NULL, *bev2 = NULL;
+ struct bufferevent *bev1_base = NULL, *bev2_base = NULL;
+ char buffer[8333];
+ int i;
+
+ test_ok = 0;
+
+ if (use_pair) {
+ struct bufferevent *pair[2];
+ tt_assert(0 == bufferevent_pair_new(NULL, 0, pair));
+ bev1 = pair[0];
+ bev2 = pair[1];
+ } else {
+ bev1 = bufferevent_socket_new(NULL, pair[0], 0);
+ bev2 = bufferevent_socket_new(NULL, pair[1], 0);
+ }
+ bev1_base = bev1;
+ bev2_base = bev2;
+
+ for (i = 0; i < (int)sizeof(buffer); i++)
+ buffer[i] = i;
+
+ bev1 = bufferevent_filter_new(bev1, NULL, bufferevent_output_filter,
+ BEV_OPT_CLOSE_ON_FREE, NULL, NULL);
+
+ bev2 = bufferevent_filter_new(bev2, bufferevent_input_filter,
+ NULL, BEV_OPT_CLOSE_ON_FREE, NULL, NULL);
+ bufferevent_setcb(bev1, NULL, writecb, errorcb, NULL);
+ bufferevent_setcb(bev2, readcb, NULL, errorcb, NULL);
+
+ tt_ptr_op(bufferevent_get_underlying(bev1), ==, bev1_base);
+ tt_ptr_op(bufferevent_get_underlying(bev2), ==, bev2_base);
+ tt_int_op(bufferevent_getfd(bev1), ==, -1);
+ tt_int_op(bufferevent_getfd(bev2), ==, -1);
+
+ bufferevent_disable(bev1, EV_READ);
+ bufferevent_enable(bev2, EV_READ);
+ /* insert some filters */
+ bufferevent_write(bev1, buffer, sizeof(buffer));
+
+ event_dispatch();
+
+ if (test_ok != 2)
+ test_ok = 0;
+
+end:
+ if (bev1)
+ bufferevent_free(bev1);
+ if (bev2)
+ bufferevent_free(bev2);
+
+}
+
+static void
+test_bufferevent_filters(void)
+{
+ test_bufferevent_filters_impl(0);
+}
+
+static void
+test_bufferevent_pair_filters(void)
+{
+ test_bufferevent_filters_impl(1);
+}
+
+
+static void
+sender_writecb(struct bufferevent *bev, void *ctx)
+{
+ if (evbuffer_get_length(bufferevent_get_output(bev)) == 0) {
+ bufferevent_disable(bev,EV_READ|EV_WRITE);
+ TT_BLATHER(("Flushed %d: freeing it.", (int)bufferevent_getfd(bev)));
+ bufferevent_free(bev);
+ }
+}
+
+static void
+sender_errorcb(struct bufferevent *bev, short what, void *ctx)
+{
+ TT_FAIL(("Got sender error %d",(int)what));
+}
+
+static int bufferevent_connect_test_flags = 0;
+static int bufferevent_trigger_test_flags = 0;
+static int n_strings_read = 0;
+static int n_reads_invoked = 0;
+static int n_events_invoked = 0;
+
+#define TEST_STR "Now is the time for all good events to signal for " \
+ "the good of their protocol"
+static void
+listen_cb(struct evconnlistener *listener, evutil_socket_t fd,
+ struct sockaddr *sa, int socklen, void *arg)
+{
+ struct event_base *base = arg;
+ struct bufferevent *bev;
+ const char s[] = TEST_STR;
+ TT_BLATHER(("Got a request on socket %d", (int)fd ));
+ bev = bufferevent_socket_new(base, fd, bufferevent_connect_test_flags);
+ tt_assert(bev);
+ bufferevent_setcb(bev, NULL, sender_writecb, sender_errorcb, NULL);
+ bufferevent_write(bev, s, sizeof(s));
+end:
+ ;
+}
+
+static int
+fake_listener_create(struct sockaddr_in *localhost)
+{
+ struct sockaddr *sa = (struct sockaddr *)localhost;
+ evutil_socket_t fd = -1;
+ ev_socklen_t slen = sizeof(*localhost);
+
+ memset(localhost, 0, sizeof(*localhost));
+ localhost->sin_port = 0; /* have the kernel pick a port */
+ localhost->sin_addr.s_addr = htonl(0x7f000001L);
+ localhost->sin_family = AF_INET;
+
+ /* bind, but don't listen or accept. should trigger
+ "Connection refused" reliably on most platforms. */
+ fd = socket(localhost->sin_family, SOCK_STREAM, 0);
+ tt_assert(fd >= 0);
+ tt_assert(bind(fd, sa, slen) == 0);
+ tt_assert(getsockname(fd, sa, &slen) == 0);
+
+ return fd;
+
+end:
+ return -1;
+}
+
+static void
+reader_eventcb(struct bufferevent *bev, short what, void *ctx)
+{
+ struct event_base *base = ctx;
+ if (what & BEV_EVENT_ERROR) {
+ perror("foobar");
+ TT_FAIL(("got connector error %d", (int)what));
+ return;
+ }
+ if (what & BEV_EVENT_CONNECTED) {
+ TT_BLATHER(("connected on %d", (int)bufferevent_getfd(bev)));
+ bufferevent_enable(bev, EV_READ);
+ }
+ if (what & BEV_EVENT_EOF) {
+ char buf[512];
+ size_t n;
+ n = bufferevent_read(bev, buf, sizeof(buf)-1);
+ tt_int_op(n, >=, 0);
+ buf[n] = '\0';
+ tt_str_op(buf, ==, TEST_STR);
+ if (++n_strings_read == 2)
+ event_base_loopexit(base, NULL);
+ TT_BLATHER(("EOF on %d: %d strings read.",
+ (int)bufferevent_getfd(bev), n_strings_read));
+ }
+end:
+ ;
+}
+
+static void
+reader_eventcb_simple(struct bufferevent *bev, short what, void *ctx)
+{
+ TT_BLATHER(("Read eventcb simple invoked on %d.",
+ (int)bufferevent_getfd(bev)));
+ n_events_invoked++;
+}
+
+static void
+reader_readcb(struct bufferevent *bev, void *ctx)
+{
+ TT_BLATHER(("Read invoked on %d.", (int)bufferevent_getfd(bev)));
+ n_reads_invoked++;
+}
+
+static void
+test_bufferevent_connect(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct evconnlistener *lev=NULL;
+ struct bufferevent *bev1=NULL, *bev2=NULL;
+ struct sockaddr_in localhost;
+ struct sockaddr_storage ss;
+ struct sockaddr *sa;
+ ev_socklen_t slen;
+
+ int be_flags=BEV_OPT_CLOSE_ON_FREE;
+
+ if (strstr((char*)data->setup_data, "defer")) {
+ be_flags |= BEV_OPT_DEFER_CALLBACKS;
+ }
+ if (strstr((char*)data->setup_data, "unlocked")) {
+ be_flags |= BEV_OPT_UNLOCK_CALLBACKS;
+ }
+ if (strstr((char*)data->setup_data, "lock")) {
+ be_flags |= BEV_OPT_THREADSAFE;
+ }
+ bufferevent_connect_test_flags = be_flags;
+#ifdef _WIN32
+ if (!strcmp((char*)data->setup_data, "unset_connectex")) {
+ struct win32_extension_fns *ext =
+ (struct win32_extension_fns *)
+ event_get_win32_extension_fns_();
+ ext->ConnectEx = NULL;
+ }
+#endif
+
+ memset(&localhost, 0, sizeof(localhost));
+
+ localhost.sin_port = 0; /* pick-a-port */
+ localhost.sin_addr.s_addr = htonl(0x7f000001L);
+ localhost.sin_family = AF_INET;
+ sa = (struct sockaddr *)&localhost;
+ lev = evconnlistener_new_bind(data->base, listen_cb, data->base,
+ LEV_OPT_CLOSE_ON_FREE|LEV_OPT_REUSEABLE,
+ 16, sa, sizeof(localhost));
+ tt_assert(lev);
+
+ sa = (struct sockaddr *)&ss;
+ slen = sizeof(ss);
+ if (regress_get_listener_addr(lev, sa, &slen) < 0) {
+ tt_abort_perror("getsockname");
+ }
+
+ tt_assert(!evconnlistener_enable(lev));
+ bev1 = bufferevent_socket_new(data->base, -1, be_flags);
+ bev2 = bufferevent_socket_new(data->base, -1, be_flags);
+ tt_assert(bev1);
+ tt_assert(bev2);
+ bufferevent_setcb(bev1, reader_readcb,NULL, reader_eventcb, data->base);
+ bufferevent_setcb(bev2, reader_readcb,NULL, reader_eventcb, data->base);
+
+ bufferevent_enable(bev1, EV_READ);
+ bufferevent_enable(bev2, EV_READ);
+
+ tt_want(!bufferevent_socket_connect(bev1, sa, sizeof(localhost)));
+ tt_want(!bufferevent_socket_connect(bev2, sa, sizeof(localhost)));
+
+ event_base_dispatch(data->base);
+
+ tt_int_op(n_strings_read, ==, 2);
+ tt_int_op(n_reads_invoked, >=, 2);
+end:
+ if (lev)
+ evconnlistener_free(lev);
+
+ if (bev1)
+ bufferevent_free(bev1);
+
+ if (bev2)
+ bufferevent_free(bev2);
+}
+
+static void
+test_bufferevent_connect_fail_eventcb(void *arg)
+{
+ struct basic_test_data *data = arg;
+ int flags = BEV_OPT_CLOSE_ON_FREE | (long)data->setup_data;
+ struct bufferevent *bev = NULL;
+ struct evconnlistener *lev = NULL;
+ struct sockaddr_in localhost;
+ ev_socklen_t slen = sizeof(localhost);
+ evutil_socket_t fake_listener = -1;
+
+ fake_listener = fake_listener_create(&localhost);
+
+ tt_int_op(n_events_invoked, ==, 0);
+
+ bev = bufferevent_socket_new(data->base, -1, flags);
+ tt_assert(bev);
+ bufferevent_setcb(bev, reader_readcb, reader_readcb,
+ reader_eventcb_simple, data->base);
+ bufferevent_enable(bev, EV_READ|EV_WRITE);
+ tt_int_op(n_events_invoked, ==, 0);
+ tt_int_op(n_reads_invoked, ==, 0);
+ /** @see also test_bufferevent_connect_fail() */
+ bufferevent_socket_connect(bev, (struct sockaddr *)&localhost, slen);
+ tt_int_op(n_events_invoked, ==, 0);
+ tt_int_op(n_reads_invoked, ==, 0);
+ event_base_dispatch(data->base);
+ tt_int_op(n_events_invoked, ==, 1);
+ tt_int_op(n_reads_invoked, ==, 0);
+
+end:
+ if (lev)
+ evconnlistener_free(lev);
+ if (bev)
+ bufferevent_free(bev);
+ if (fake_listener >= 0)
+ evutil_closesocket(fake_listener);
+}
+
+static void
+want_fail_eventcb(struct bufferevent *bev, short what, void *ctx)
+{
+ struct event_base *base = ctx;
+ const char *err;
+ evutil_socket_t s;
+
+ if (what & BEV_EVENT_ERROR) {
+ s = bufferevent_getfd(bev);
+ err = evutil_socket_error_to_string(evutil_socket_geterror(s));
+ TT_BLATHER(("connection failure on "EV_SOCK_FMT": %s",
+ EV_SOCK_ARG(s), err));
+ test_ok = 1;
+ } else {
+ TT_FAIL(("didn't fail? what %hd", what));
+ }
+
+ event_base_loopexit(base, NULL);
+}
+
+static void
+close_socket_cb(evutil_socket_t fd, short what, void *arg)
+{
+ evutil_socket_t *fdp = arg;
+ if (*fdp >= 0) {
+ evutil_closesocket(*fdp);
+ *fdp = -1;
+ }
+}
+
+static void
+test_bufferevent_connect_fail(void *arg)
+{
+ struct basic_test_data *data = (struct basic_test_data *)arg;
+ struct bufferevent *bev=NULL;
+ struct event close_listener_event;
+ int close_listener_event_added = 0;
+ struct timeval one_second = { 1, 0 };
+ struct sockaddr_in localhost;
+ ev_socklen_t slen = sizeof(localhost);
+ evutil_socket_t fake_listener = -1;
+ int r;
+
+ test_ok = 0;
+
+ fake_listener = fake_listener_create(&localhost);
+ bev = bufferevent_socket_new(data->base, -1,
+ BEV_OPT_CLOSE_ON_FREE | BEV_OPT_DEFER_CALLBACKS);
+ tt_assert(bev);
+ bufferevent_setcb(bev, NULL, NULL, want_fail_eventcb, data->base);
+
+ r = bufferevent_socket_connect(bev, (struct sockaddr *)&localhost, slen);
+ /* XXXX we'd like to test the '0' case everywhere, but FreeBSD tells
+ * detects the error immediately, which is not really wrong of it. */
+ tt_want(r == 0 || r == -1);
+
+ /* Close the listener socket after a second. This should trigger
+ "connection refused" on some other platforms, including OSX. */
+ evtimer_assign(&close_listener_event, data->base, close_socket_cb,
+ &fake_listener);
+ event_add(&close_listener_event, &one_second);
+ close_listener_event_added = 1;
+
+ event_base_dispatch(data->base);
+
+ tt_int_op(test_ok, ==, 1);
+
+end:
+ if (fake_listener >= 0)
+ evutil_closesocket(fake_listener);
+
+ if (bev)
+ bufferevent_free(bev);
+
+ if (close_listener_event_added)
+ event_del(&close_listener_event);
+}
+
+struct timeout_cb_result {
+ struct timeval read_timeout_at;
+ struct timeval write_timeout_at;
+ struct timeval last_wrote_at;
+ int n_read_timeouts;
+ int n_write_timeouts;
+ int total_calls;
+};
+
+static void
+bev_timeout_write_cb(struct bufferevent *bev, void *arg)
+{
+ struct timeout_cb_result *res = arg;
+ evutil_gettimeofday(&res->last_wrote_at, NULL);
+}
+
+static void
+bev_timeout_event_cb(struct bufferevent *bev, short what, void *arg)
+{
+ struct timeout_cb_result *res = arg;
+ ++res->total_calls;
+
+ if ((what & (BEV_EVENT_READING|BEV_EVENT_TIMEOUT))
+ == (BEV_EVENT_READING|BEV_EVENT_TIMEOUT)) {
+ evutil_gettimeofday(&res->read_timeout_at, NULL);
+ ++res->n_read_timeouts;
+ }
+ if ((what & (BEV_EVENT_WRITING|BEV_EVENT_TIMEOUT))
+ == (BEV_EVENT_WRITING|BEV_EVENT_TIMEOUT)) {
+ evutil_gettimeofday(&res->write_timeout_at, NULL);
+ ++res->n_write_timeouts;
+ }
+}
+
+static void
+test_bufferevent_timeouts(void *arg)
+{
+ /* "arg" is a string containing "pair" and/or "filter". */
+ struct bufferevent *bev1 = NULL, *bev2 = NULL;
+ struct basic_test_data *data = arg;
+ int use_pair = 0, use_filter = 0;
+ struct timeval tv_w, tv_r, started_at;
+ struct timeout_cb_result res1, res2;
+ char buf[1024];
+
+ memset(&res1, 0, sizeof(res1));
+ memset(&res2, 0, sizeof(res2));
+
+ if (strstr((char*)data->setup_data, "pair"))
+ use_pair = 1;
+ if (strstr((char*)data->setup_data, "filter"))
+ use_filter = 1;
+
+ if (use_pair) {
+ struct bufferevent *p[2];
+ tt_int_op(0, ==, bufferevent_pair_new(data->base, 0, p));
+ bev1 = p[0];
+ bev2 = p[1];
+ } else {
+ bev1 = bufferevent_socket_new(data->base, data->pair[0], 0);
+ bev2 = bufferevent_socket_new(data->base, data->pair[1], 0);
+ }
+
+ tt_assert(bev1);
+ tt_assert(bev2);
+
+ if (use_filter) {
+ struct bufferevent *bevf1, *bevf2;
+ bevf1 = bufferevent_filter_new(bev1, NULL, NULL,
+ BEV_OPT_CLOSE_ON_FREE, NULL, NULL);
+ bevf2 = bufferevent_filter_new(bev2, NULL, NULL,
+ BEV_OPT_CLOSE_ON_FREE, NULL, NULL);
+ tt_assert(bevf1);
+ tt_assert(bevf2);
+ bev1 = bevf1;
+ bev2 = bevf2;
+ }
+
+ /* Do this nice and early. */
+ bufferevent_disable(bev2, EV_READ);
+
+ /* bev1 will try to write and read. Both will time out. */
+ evutil_gettimeofday(&started_at, NULL);
+ tv_w.tv_sec = tv_r.tv_sec = 0;
+ tv_w.tv_usec = 100*1000;
+ tv_r.tv_usec = 150*1000;
+ bufferevent_setcb(bev1, NULL, bev_timeout_write_cb,
+ bev_timeout_event_cb, &res1);
+ bufferevent_setwatermark(bev1, EV_WRITE, 1024*1024+10, 0);
+ bufferevent_set_timeouts(bev1, &tv_r, &tv_w);
+ if (use_pair) {
+ /* For a pair, the fact that the other side isn't reading
+ * makes the writer stall */
+ bufferevent_write(bev1, "ABCDEFG", 7);
+ } else {
+ /* For a real socket, the kernel's TCP buffers can eat a
+ * fair number of bytes; make sure that at some point we
+ * have some bytes that will stall. */
+ struct evbuffer *output = bufferevent_get_output(bev1);
+ int i;
+ memset(buf, 0xbb, sizeof(buf));
+ for (i=0;i<1024;++i) {
+ evbuffer_add_reference(output, buf, sizeof(buf),
+ NULL, NULL);
+ }
+ }
+ bufferevent_enable(bev1, EV_READ|EV_WRITE);
+
+ /* bev2 has nothing to say, and isn't listening. */
+ bufferevent_setcb(bev2, NULL, bev_timeout_write_cb,
+ bev_timeout_event_cb, &res2);
+ tv_w.tv_sec = tv_r.tv_sec = 0;
+ tv_w.tv_usec = 200*1000;
+ tv_r.tv_usec = 100*1000;
+ bufferevent_set_timeouts(bev2, &tv_r, &tv_w);
+ bufferevent_enable(bev2, EV_WRITE);
+
+ tv_r.tv_sec = 0;
+ tv_r.tv_usec = 350000;
+
+ event_base_loopexit(data->base, &tv_r);
+ event_base_dispatch(data->base);
+
+ /* XXXX Test that actually reading or writing a little resets the
+ * timeouts. */
+
+ /* Each buf1 timeout happens, and happens only once. */
+ tt_want(res1.n_read_timeouts);
+ tt_want(res1.n_write_timeouts);
+ tt_want(res1.n_read_timeouts == 1);
+ tt_want(res1.n_write_timeouts == 1);
+
+ test_timeval_diff_eq(&started_at, &res1.read_timeout_at, 150);
+ test_timeval_diff_eq(&started_at, &res1.write_timeout_at, 100);
+
+end:
+ if (bev1)
+ bufferevent_free(bev1);
+ if (bev2)
+ bufferevent_free(bev2);
+}
+
+static void
+trigger_failure_cb(evutil_socket_t fd, short what, void *ctx)
+{
+ TT_FAIL(("The triggered callback did not fire or the machine is really slow (try increasing timeout)."));
+}
+
+static void
+trigger_eventcb(struct bufferevent *bev, short what, void *ctx)
+{
+ struct event_base *base = ctx;
+ if (what == ~0) {
+ TT_BLATHER(("Event successfully triggered."));
+ event_base_loopexit(base, NULL);
+ return;
+ }
+ reader_eventcb(bev, what, ctx);
+}
+
+static void
+trigger_readcb_triggered(struct bufferevent *bev, void *ctx)
+{
+ TT_BLATHER(("Read successfully triggered."));
+ n_reads_invoked++;
+ bufferevent_trigger_event(bev, ~0, bufferevent_trigger_test_flags);
+}
+
+static void
+trigger_readcb(struct bufferevent *bev, void *ctx)
+{
+ struct timeval timeout = { 30, 0 };
+ struct event_base *base = ctx;
+ size_t low, high, len;
+ int expected_reads;
+
+ TT_BLATHER(("Read invoked on %d.", (int)bufferevent_getfd(bev)));
+ expected_reads = ++n_reads_invoked;
+
+ bufferevent_setcb(bev, trigger_readcb_triggered, NULL, trigger_eventcb, ctx);
+
+ bufferevent_getwatermark(bev, EV_READ, &low, &high);
+ len = evbuffer_get_length(bufferevent_get_input(bev));
+
+ bufferevent_setwatermark(bev, EV_READ, len + 1, 0);
+ bufferevent_trigger(bev, EV_READ, bufferevent_trigger_test_flags);
+ /* no callback expected */
+ tt_int_op(n_reads_invoked, ==, expected_reads);
+
+ if ((bufferevent_trigger_test_flags & BEV_TRIG_DEFER_CALLBACKS) ||
+ (bufferevent_connect_test_flags & BEV_OPT_DEFER_CALLBACKS)) {
+ /* will be deferred */
+ } else {
+ expected_reads++;
+ }
+
+ event_base_once(base, -1, EV_TIMEOUT, trigger_failure_cb, NULL, &timeout);
+
+ bufferevent_trigger(bev, EV_READ,
+ bufferevent_trigger_test_flags | BEV_TRIG_IGNORE_WATERMARKS);
+ tt_int_op(n_reads_invoked, ==, expected_reads);
+
+ bufferevent_setwatermark(bev, EV_READ, low, high);
+end:
+ ;
+}
+
+static void
+test_bufferevent_trigger(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct evconnlistener *lev=NULL;
+ struct bufferevent *bev=NULL;
+ struct sockaddr_in localhost;
+ struct sockaddr_storage ss;
+ struct sockaddr *sa;
+ ev_socklen_t slen;
+
+ int be_flags=BEV_OPT_CLOSE_ON_FREE;
+ int trig_flags=0;
+
+ if (strstr((char*)data->setup_data, "defer")) {
+ be_flags |= BEV_OPT_DEFER_CALLBACKS;
+ }
+ bufferevent_connect_test_flags = be_flags;
+
+ if (strstr((char*)data->setup_data, "postpone")) {
+ trig_flags |= BEV_TRIG_DEFER_CALLBACKS;
+ }
+ bufferevent_trigger_test_flags = trig_flags;
+
+ memset(&localhost, 0, sizeof(localhost));
+
+ localhost.sin_port = 0; /* pick-a-port */
+ localhost.sin_addr.s_addr = htonl(0x7f000001L);
+ localhost.sin_family = AF_INET;
+ sa = (struct sockaddr *)&localhost;
+ lev = evconnlistener_new_bind(data->base, listen_cb, data->base,
+ LEV_OPT_CLOSE_ON_FREE|LEV_OPT_REUSEABLE,
+ 16, sa, sizeof(localhost));
+ tt_assert(lev);
+
+ sa = (struct sockaddr *)&ss;
+ slen = sizeof(ss);
+ if (regress_get_listener_addr(lev, sa, &slen) < 0) {
+ tt_abort_perror("getsockname");
+ }
+
+ tt_assert(!evconnlistener_enable(lev));
+ bev = bufferevent_socket_new(data->base, -1, be_flags);
+ tt_assert(bev);
+ bufferevent_setcb(bev, trigger_readcb, NULL, trigger_eventcb, data->base);
+
+ bufferevent_enable(bev, EV_READ);
+
+ tt_want(!bufferevent_socket_connect(bev, sa, sizeof(localhost)));
+
+ event_base_dispatch(data->base);
+
+ tt_int_op(n_reads_invoked, ==, 2);
+end:
+ if (lev)
+ evconnlistener_free(lev);
+
+ if (bev)
+ bufferevent_free(bev);
+}
+
+static void
+test_bufferevent_socket_filter_inactive(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct bufferevent *bev = NULL, *bevf = NULL;
+
+ bev = bufferevent_socket_new(data->base, -1, 0);
+ tt_assert(bev);
+ bevf = bufferevent_filter_new(bev, NULL, NULL, 0, NULL, NULL);
+ tt_assert(bevf);
+
+end:
+ if (bevf)
+ bufferevent_free(bevf);
+ if (bev)
+ bufferevent_free(bev);
+}
+
+
+struct testcase_t bufferevent_testcases[] = {
+
+ LEGACY(bufferevent, TT_ISOLATED),
+ LEGACY(bufferevent_pair, TT_ISOLATED),
+ LEGACY(bufferevent_flush_normal, TT_ISOLATED),
+ LEGACY(bufferevent_flush_flush, TT_ISOLATED),
+ LEGACY(bufferevent_flush_finished, TT_ISOLATED),
+ LEGACY(bufferevent_pair_flush_normal, TT_ISOLATED),
+ LEGACY(bufferevent_pair_flush_flush, TT_ISOLATED),
+ LEGACY(bufferevent_pair_flush_finished, TT_ISOLATED),
+#if defined(EVTHREAD_USE_PTHREADS_IMPLEMENTED)
+ { "bufferevent_pair_release_lock", test_bufferevent_pair_release_lock,
+ TT_FORK|TT_ISOLATED|TT_NEED_THREADS|TT_NEED_BASE|TT_LEGACY,
+ &basic_setup, NULL },
+#endif
+ LEGACY(bufferevent_watermarks, TT_ISOLATED),
+ LEGACY(bufferevent_pair_watermarks, TT_ISOLATED),
+ LEGACY(bufferevent_filters, TT_ISOLATED),
+ LEGACY(bufferevent_pair_filters, TT_ISOLATED),
+ { "bufferevent_connect", test_bufferevent_connect, TT_FORK|TT_NEED_BASE,
+ &basic_setup, (void*)"" },
+ { "bufferevent_connect_defer", test_bufferevent_connect,
+ TT_FORK|TT_NEED_BASE, &basic_setup, (void*)"defer" },
+ { "bufferevent_connect_lock", test_bufferevent_connect,
+ TT_FORK|TT_NEED_BASE|TT_NEED_THREADS, &basic_setup, (void*)"lock" },
+ { "bufferevent_connect_lock_defer", test_bufferevent_connect,
+ TT_FORK|TT_NEED_BASE|TT_NEED_THREADS, &basic_setup,
+ (void*)"defer lock" },
+ { "bufferevent_connect_unlocked_cbs", test_bufferevent_connect,
+ TT_FORK|TT_NEED_BASE|TT_NEED_THREADS, &basic_setup,
+ (void*)"lock defer unlocked" },
+ { "bufferevent_connect_fail", test_bufferevent_connect_fail,
+ TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
+ { "bufferevent_timeout", test_bufferevent_timeouts,
+ TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR, &basic_setup, (void*)"" },
+ { "bufferevent_timeout_pair", test_bufferevent_timeouts,
+ TT_FORK|TT_NEED_BASE, &basic_setup, (void*)"pair" },
+ { "bufferevent_timeout_filter", test_bufferevent_timeouts,
+ TT_FORK|TT_NEED_BASE, &basic_setup, (void*)"filter" },
+ { "bufferevent_timeout_filter_pair", test_bufferevent_timeouts,
+ TT_FORK|TT_NEED_BASE, &basic_setup, (void*)"filter pair" },
+ { "bufferevent_trigger", test_bufferevent_trigger, TT_FORK|TT_NEED_BASE,
+ &basic_setup, (void*)"" },
+ { "bufferevent_trigger_defer", test_bufferevent_trigger,
+ TT_FORK|TT_NEED_BASE, &basic_setup, (void*)"defer" },
+ { "bufferevent_trigger_postpone", test_bufferevent_trigger,
+ TT_FORK|TT_NEED_BASE|TT_NEED_THREADS, &basic_setup,
+ (void*)"postpone" },
+ { "bufferevent_trigger_defer_postpone", test_bufferevent_trigger,
+ TT_FORK|TT_NEED_BASE|TT_NEED_THREADS, &basic_setup,
+ (void*)"defer postpone" },
+#ifdef EVENT__HAVE_LIBZ
+ LEGACY(bufferevent_zlib, TT_ISOLATED),
+#else
+ { "bufferevent_zlib", NULL, TT_SKIP, NULL, NULL },
+#endif
+
+ { "bufferevent_connect_fail_eventcb_defer",
+ test_bufferevent_connect_fail_eventcb,
+ TT_FORK|TT_NEED_BASE, &basic_setup, (void*)BEV_OPT_DEFER_CALLBACKS },
+ { "bufferevent_connect_fail_eventcb",
+ test_bufferevent_connect_fail_eventcb,
+ TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
+
+ { "bufferevent_socket_filter_inactive",
+ test_bufferevent_socket_filter_inactive,
+ TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
+
+ END_OF_TESTCASES,
+};
+
+struct testcase_t bufferevent_iocp_testcases[] = {
+
+ LEGACY(bufferevent, TT_ISOLATED|TT_ENABLE_IOCP),
+ LEGACY(bufferevent_flush_normal, TT_ISOLATED),
+ LEGACY(bufferevent_flush_flush, TT_ISOLATED),
+ LEGACY(bufferevent_flush_finished, TT_ISOLATED),
+ LEGACY(bufferevent_watermarks, TT_ISOLATED|TT_ENABLE_IOCP),
+ LEGACY(bufferevent_filters, TT_ISOLATED|TT_ENABLE_IOCP),
+ { "bufferevent_connect", test_bufferevent_connect,
+ TT_FORK|TT_NEED_BASE|TT_ENABLE_IOCP, &basic_setup, (void*)"" },
+ { "bufferevent_connect_defer", test_bufferevent_connect,
+ TT_FORK|TT_NEED_BASE|TT_ENABLE_IOCP, &basic_setup, (void*)"defer" },
+ { "bufferevent_connect_lock", test_bufferevent_connect,
+ TT_FORK|TT_NEED_BASE|TT_NEED_THREADS|TT_ENABLE_IOCP, &basic_setup,
+ (void*)"lock" },
+ { "bufferevent_connect_lock_defer", test_bufferevent_connect,
+ TT_FORK|TT_NEED_BASE|TT_NEED_THREADS|TT_ENABLE_IOCP, &basic_setup,
+ (void*)"defer lock" },
+ { "bufferevent_connect_fail", test_bufferevent_connect_fail,
+ TT_FORK|TT_NEED_BASE|TT_ENABLE_IOCP, &basic_setup, NULL },
+ { "bufferevent_connect_nonblocking", test_bufferevent_connect,
+ TT_FORK|TT_NEED_BASE|TT_ENABLE_IOCP, &basic_setup,
+ (void*)"unset_connectex" },
+
+ { "bufferevent_connect_fail_eventcb_defer",
+ test_bufferevent_connect_fail_eventcb,
+ TT_FORK|TT_NEED_BASE|TT_ENABLE_IOCP, &basic_setup,
+ (void*)BEV_OPT_DEFER_CALLBACKS },
+ { "bufferevent_connect_fail",
+ test_bufferevent_connect_fail_eventcb,
+ TT_FORK|TT_NEED_BASE|TT_ENABLE_IOCP, &basic_setup, NULL },
+
+ END_OF_TESTCASES,
+};
diff --git a/libs/libevent/docs/test/regress_dns.c b/libs/libevent/docs/test/regress_dns.c
new file mode 100644
index 0000000000..1873636245
--- /dev/null
+++ b/libs/libevent/docs/test/regress_dns.c
@@ -0,0 +1,2151 @@
+/*
+ * Copyright (c) 2003-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "../util-internal.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <windows.h>
+#include <ws2tcpip.h>
+#endif
+
+#include "event2/event-config.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifndef _WIN32
+#include <sys/socket.h>
+#include <signal.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+#endif
+#ifdef EVENT__HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif
+#ifdef HAVE_NETDB_H
+#include <netdb.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include "event2/dns.h"
+#include "event2/dns_compat.h"
+#include "event2/dns_struct.h"
+#include "event2/event.h"
+#include "event2/event_compat.h"
+#include "event2/event_struct.h"
+#include "event2/util.h"
+#include "event2/listener.h"
+#include "event2/bufferevent.h"
+#include "log-internal.h"
+#include "regress.h"
+#include "regress_testutils.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
+
+static int dns_ok = 0;
+static int dns_got_cancel = 0;
+static int dns_err = 0;
+
+
+static void
+dns_gethostbyname_cb(int result, char type, int count, int ttl,
+ void *addresses, void *arg)
+{
+ dns_ok = dns_err = 0;
+
+ if (result == DNS_ERR_TIMEOUT) {
+ printf("[Timed out] ");
+ dns_err = result;
+ goto out;
+ }
+
+ if (result != DNS_ERR_NONE) {
+ printf("[Error code %d] ", result);
+ goto out;
+ }
+
+ TT_BLATHER(("type: %d, count: %d, ttl: %d: ", type, count, ttl));
+
+ switch (type) {
+ case DNS_IPv6_AAAA: {
+#if defined(EVENT__HAVE_STRUCT_IN6_ADDR) && defined(EVENT__HAVE_INET_NTOP) && defined(INET6_ADDRSTRLEN)
+ struct in6_addr *in6_addrs = addresses;
+ char buf[INET6_ADDRSTRLEN+1];
+ int i;
+ /* a resolution that's not valid does not help */
+ if (ttl < 0)
+ goto out;
+ for (i = 0; i < count; ++i) {
+ const char *b = evutil_inet_ntop(AF_INET6, &in6_addrs[i], buf,sizeof(buf));
+ if (b)
+ TT_BLATHER(("%s ", b));
+ else
+ TT_BLATHER(("%s ", strerror(errno)));
+ }
+#endif
+ break;
+ }
+ case DNS_IPv4_A: {
+ struct in_addr *in_addrs = addresses;
+ int i;
+ /* a resolution that's not valid does not help */
+ if (ttl < 0)
+ goto out;
+ for (i = 0; i < count; ++i)
+ TT_BLATHER(("%s ", inet_ntoa(in_addrs[i])));
+ break;
+ }
+ case DNS_PTR:
+ /* may get at most one PTR */
+ if (count != 1)
+ goto out;
+
+ TT_BLATHER(("%s ", *(char **)addresses));
+ break;
+ default:
+ goto out;
+ }
+
+ dns_ok = type;
+
+out:
+ if (arg == NULL)
+ event_loopexit(NULL);
+ else
+ event_base_loopexit((struct event_base *)arg, NULL);
+}
+
+static void
+dns_gethostbyname(void)
+{
+ dns_ok = 0;
+ evdns_resolve_ipv4("www.monkey.org", 0, dns_gethostbyname_cb, NULL);
+ event_dispatch();
+
+ tt_int_op(dns_ok, ==, DNS_IPv4_A);
+ test_ok = dns_ok;
+end:
+ ;
+}
+
+static void
+dns_gethostbyname6(void)
+{
+ dns_ok = 0;
+ evdns_resolve_ipv6("www.ietf.org", 0, dns_gethostbyname_cb, NULL);
+ event_dispatch();
+
+ if (!dns_ok && dns_err == DNS_ERR_TIMEOUT) {
+ tt_skip();
+ }
+
+ tt_int_op(dns_ok, ==, DNS_IPv6_AAAA);
+ test_ok = 1;
+end:
+ ;
+}
+
+static void
+dns_gethostbyaddr(void)
+{
+ struct in_addr in;
+ in.s_addr = htonl(0x7f000001ul); /* 127.0.0.1 */
+ dns_ok = 0;
+ evdns_resolve_reverse(&in, 0, dns_gethostbyname_cb, NULL);
+ event_dispatch();
+
+ tt_int_op(dns_ok, ==, DNS_PTR);
+ test_ok = dns_ok;
+end:
+ ;
+}
+
+static void
+dns_resolve_reverse(void *ptr)
+{
+ struct in_addr in;
+ struct event_base *base = event_base_new();
+ struct evdns_base *dns = evdns_base_new(base, 1/* init name servers */);
+ struct evdns_request *req = NULL;
+
+ tt_assert(base);
+ tt_assert(dns);
+ in.s_addr = htonl(0x7f000001ul); /* 127.0.0.1 */
+ dns_ok = 0;
+
+ req = evdns_base_resolve_reverse(
+ dns, &in, 0, dns_gethostbyname_cb, base);
+ tt_assert(req);
+
+ event_base_dispatch(base);
+
+ tt_int_op(dns_ok, ==, DNS_PTR);
+
+end:
+ if (dns)
+ evdns_base_free(dns, 0);
+ if (base)
+ event_base_free(base);
+}
+
+static int n_server_responses = 0;
+
+static void
+dns_server_request_cb(struct evdns_server_request *req, void *data)
+{
+ int i, r;
+ const char TEST_ARPA[] = "11.11.168.192.in-addr.arpa";
+ const char TEST_IN6[] =
+ "f.e.f.e." "0.0.0.0." "0.0.0.0." "1.1.1.1."
+ "a.a.a.a." "0.0.0.0." "0.0.0.0." "0.f.f.f.ip6.arpa";
+
+ for (i = 0; i < req->nquestions; ++i) {
+ const int qtype = req->questions[i]->type;
+ const int qclass = req->questions[i]->dns_question_class;
+ const char *qname = req->questions[i]->name;
+
+ struct in_addr ans;
+ ans.s_addr = htonl(0xc0a80b0bUL); /* 192.168.11.11 */
+ if (qtype == EVDNS_TYPE_A &&
+ qclass == EVDNS_CLASS_INET &&
+ !evutil_ascii_strcasecmp(qname, "zz.example.com")) {
+ r = evdns_server_request_add_a_reply(req, qname,
+ 1, &ans.s_addr, 12345);
+ if (r<0)
+ dns_ok = 0;
+ } else if (qtype == EVDNS_TYPE_AAAA &&
+ qclass == EVDNS_CLASS_INET &&
+ !evutil_ascii_strcasecmp(qname, "zz.example.com")) {
+ char addr6[17] = "abcdefghijklmnop";
+ r = evdns_server_request_add_aaaa_reply(req,
+ qname, 1, addr6, 123);
+ if (r<0)
+ dns_ok = 0;
+ } else if (qtype == EVDNS_TYPE_PTR &&
+ qclass == EVDNS_CLASS_INET &&
+ !evutil_ascii_strcasecmp(qname, TEST_ARPA)) {
+ r = evdns_server_request_add_ptr_reply(req, NULL,
+ qname, "ZZ.EXAMPLE.COM", 54321);
+ if (r<0)
+ dns_ok = 0;
+ } else if (qtype == EVDNS_TYPE_PTR &&
+ qclass == EVDNS_CLASS_INET &&
+ !evutil_ascii_strcasecmp(qname, TEST_IN6)){
+ r = evdns_server_request_add_ptr_reply(req, NULL,
+ qname,
+ "ZZ-INET6.EXAMPLE.COM", 54322);
+ if (r<0)
+ dns_ok = 0;
+ } else if (qtype == EVDNS_TYPE_A &&
+ qclass == EVDNS_CLASS_INET &&
+ !evutil_ascii_strcasecmp(qname, "drop.example.com")) {
+ if (evdns_server_request_drop(req)<0)
+ dns_ok = 0;
+ return;
+ } else {
+ printf("Unexpected question %d %d \"%s\" ",
+ qtype, qclass, qname);
+ dns_ok = 0;
+ }
+ }
+ r = evdns_server_request_respond(req, 0);
+ if (r<0) {
+ printf("Couldn't send reply. ");
+ dns_ok = 0;
+ }
+}
+
+static void
+dns_server_gethostbyname_cb(int result, char type, int count, int ttl,
+ void *addresses, void *arg)
+{
+ if (result == DNS_ERR_CANCEL) {
+ if (arg != (void*)(char*)90909) {
+ printf("Unexpected cancelation");
+ dns_ok = 0;
+ }
+ dns_got_cancel = 1;
+ goto out;
+ }
+ if (result != DNS_ERR_NONE) {
+ printf("Unexpected result %d. ", result);
+ dns_ok = 0;
+ goto out;
+ }
+ if (count != 1) {
+ printf("Unexpected answer count %d. ", count);
+ dns_ok = 0;
+ goto out;
+ }
+ switch (type) {
+ case DNS_IPv4_A: {
+ struct in_addr *in_addrs = addresses;
+ if (in_addrs[0].s_addr != htonl(0xc0a80b0bUL) || ttl != 12345) {
+ printf("Bad IPv4 response \"%s\" %d. ",
+ inet_ntoa(in_addrs[0]), ttl);
+ dns_ok = 0;
+ goto out;
+ }
+ break;
+ }
+ case DNS_IPv6_AAAA: {
+#if defined (EVENT__HAVE_STRUCT_IN6_ADDR) && defined(EVENT__HAVE_INET_NTOP) && defined(INET6_ADDRSTRLEN)
+ struct in6_addr *in6_addrs = addresses;
+ char buf[INET6_ADDRSTRLEN+1];
+ if (memcmp(&in6_addrs[0].s6_addr, "abcdefghijklmnop", 16)
+ || ttl != 123) {
+ const char *b = evutil_inet_ntop(AF_INET6, &in6_addrs[0],buf,sizeof(buf));
+ printf("Bad IPv6 response \"%s\" %d. ", b, ttl);
+ dns_ok = 0;
+ goto out;
+ }
+#endif
+ break;
+ }
+ case DNS_PTR: {
+ char **addrs = addresses;
+ if (arg != (void*)6) {
+ if (strcmp(addrs[0], "ZZ.EXAMPLE.COM") ||
+ ttl != 54321) {
+ printf("Bad PTR response \"%s\" %d. ",
+ addrs[0], ttl);
+ dns_ok = 0;
+ goto out;
+ }
+ } else {
+ if (strcmp(addrs[0], "ZZ-INET6.EXAMPLE.COM") ||
+ ttl != 54322) {
+ printf("Bad ipv6 PTR response \"%s\" %d. ",
+ addrs[0], ttl);
+ dns_ok = 0;
+ goto out;
+ }
+ }
+ break;
+ }
+ default:
+ printf("Bad response type %d. ", type);
+ dns_ok = 0;
+ }
+ out:
+ if (++n_server_responses == 3) {
+ event_loopexit(NULL);
+ }
+}
+
+static void
+dns_server(void)
+{
+ evutil_socket_t sock=-1;
+ struct sockaddr_in my_addr;
+ struct sockaddr_storage ss;
+ ev_socklen_t slen;
+ struct evdns_server_port *port=NULL;
+ struct in_addr resolve_addr;
+ struct in6_addr resolve_addr6;
+ struct evdns_base *base=NULL;
+ struct evdns_request *req=NULL;
+
+ dns_ok = 1;
+
+ base = evdns_base_new(NULL, 0);
+
+ /* Now configure a nameserver port. */
+ sock = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock<0) {
+ tt_abort_perror("socket");
+ }
+
+ evutil_make_socket_nonblocking(sock);
+
+ memset(&my_addr, 0, sizeof(my_addr));
+ my_addr.sin_family = AF_INET;
+ my_addr.sin_port = 0; /* kernel picks */
+ my_addr.sin_addr.s_addr = htonl(0x7f000001UL);
+ if (bind(sock, (struct sockaddr*)&my_addr, sizeof(my_addr)) < 0) {
+ tt_abort_perror("bind");
+ }
+ slen = sizeof(ss);
+ if (getsockname(sock, (struct sockaddr*)&ss, &slen) < 0) {
+ tt_abort_perror("getsockname");
+ }
+
+ port = evdns_add_server_port(sock, 0, dns_server_request_cb, NULL);
+
+ /* Add ourself as the only nameserver, and make sure we really are
+ * the only nameserver. */
+ evdns_base_nameserver_sockaddr_add(base, (struct sockaddr*)&ss, slen, 0);
+ tt_int_op(evdns_base_count_nameservers(base), ==, 1);
+ {
+ struct sockaddr_storage ss2;
+ int slen2;
+
+ memset(&ss2, 0, sizeof(ss2));
+
+ slen2 = evdns_base_get_nameserver_addr(base, 0, (struct sockaddr *)&ss2, 3);
+ tt_int_op(slen2, ==, slen);
+ tt_int_op(ss2.ss_family, ==, 0);
+ slen2 = evdns_base_get_nameserver_addr(base, 0, (struct sockaddr *)&ss2, sizeof(ss2));
+ tt_int_op(slen2, ==, slen);
+ tt_mem_op(&ss2, ==, &ss, slen);
+
+ slen2 = evdns_base_get_nameserver_addr(base, 1, (struct sockaddr *)&ss2, sizeof(ss2));
+ tt_int_op(-1, ==, slen2);
+ }
+
+ /* Send some queries. */
+ evdns_base_resolve_ipv4(base, "zz.example.com", DNS_QUERY_NO_SEARCH,
+ dns_server_gethostbyname_cb, NULL);
+ evdns_base_resolve_ipv6(base, "zz.example.com", DNS_QUERY_NO_SEARCH,
+ dns_server_gethostbyname_cb, NULL);
+ resolve_addr.s_addr = htonl(0xc0a80b0bUL); /* 192.168.11.11 */
+ evdns_base_resolve_reverse(base, &resolve_addr, 0,
+ dns_server_gethostbyname_cb, NULL);
+ memcpy(resolve_addr6.s6_addr,
+ "\xff\xf0\x00\x00\x00\x00\xaa\xaa"
+ "\x11\x11\x00\x00\x00\x00\xef\xef", 16);
+ evdns_base_resolve_reverse_ipv6(base, &resolve_addr6, 0,
+ dns_server_gethostbyname_cb, (void*)6);
+
+ req = evdns_base_resolve_ipv4(base,
+ "drop.example.com", DNS_QUERY_NO_SEARCH,
+ dns_server_gethostbyname_cb, (void*)(char*)90909);
+
+ evdns_cancel_request(base, req);
+
+ event_dispatch();
+
+ tt_assert(dns_got_cancel);
+ test_ok = dns_ok;
+
+end:
+ if (port)
+ evdns_close_server_port(port);
+ if (sock >= 0)
+ evutil_closesocket(sock);
+ if (base)
+ evdns_base_free(base, 0);
+}
+
+static int n_replies_left;
+static struct event_base *exit_base;
+static struct evdns_server_port *exit_port;
+
+struct generic_dns_callback_result {
+ int result;
+ char type;
+ int count;
+ int ttl;
+ size_t addrs_len;
+ void *addrs;
+ char addrs_buf[256];
+};
+
+static void
+generic_dns_callback(int result, char type, int count, int ttl, void *addresses,
+ void *arg)
+{
+ size_t len;
+ struct generic_dns_callback_result *res = arg;
+ res->result = result;
+ res->type = type;
+ res->count = count;
+ res->ttl = ttl;
+
+ if (type == DNS_IPv4_A)
+ len = count * 4;
+ else if (type == DNS_IPv6_AAAA)
+ len = count * 16;
+ else if (type == DNS_PTR)
+ len = strlen(addresses)+1;
+ else {
+ res->addrs_len = len = 0;
+ res->addrs = NULL;
+ }
+ if (len) {
+ res->addrs_len = len;
+ if (len > 256)
+ len = 256;
+ memcpy(res->addrs_buf, addresses, len);
+ res->addrs = res->addrs_buf;
+ }
+
+ --n_replies_left;
+ if (n_replies_left == 0) {
+ if (exit_port) {
+ evdns_close_server_port(exit_port);
+ exit_port = NULL;
+ } else
+ event_base_loopexit(exit_base, NULL);
+ }
+}
+
+static struct regress_dns_server_table search_table[] = {
+ { "host.a.example.com", "err", "3", 0, 0 },
+ { "host.b.example.com", "err", "3", 0, 0 },
+ { "host.c.example.com", "A", "11.22.33.44", 0, 0 },
+ { "host2.a.example.com", "err", "3", 0, 0 },
+ { "host2.b.example.com", "A", "200.100.0.100", 0, 0 },
+ { "host2.c.example.com", "err", "3", 0, 0 },
+ { "hostn.a.example.com", "errsoa", "0", 0, 0 },
+ { "hostn.b.example.com", "errsoa", "3", 0, 0 },
+ { "hostn.c.example.com", "err", "0", 0, 0 },
+
+ { "host", "err", "3", 0, 0 },
+ { "host2", "err", "3", 0, 0 },
+ { "*", "err", "3", 0, 0 },
+ { NULL, NULL, NULL, 0, 0 }
+};
+static void
+dns_search_test_impl(void *arg, int lower)
+{
+ struct regress_dns_server_table table[ARRAY_SIZE(search_table)];
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+ struct evdns_base *dns = NULL;
+ ev_uint16_t portnum = 0;
+ char buf[64];
+
+ struct generic_dns_callback_result r[8];
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(table); ++i) {
+ table[i] = search_table[i];
+ table[i].lower = lower;
+ }
+
+ tt_assert(regress_dnsserver(base, &portnum, table));
+ evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", (int)portnum);
+
+ dns = evdns_base_new(base, 0);
+ tt_assert(!evdns_base_nameserver_ip_add(dns, buf));
+
+ evdns_base_search_add(dns, "a.example.com");
+ evdns_base_search_add(dns, "b.example.com");
+ evdns_base_search_add(dns, "c.example.com");
+
+ n_replies_left = ARRAY_SIZE(r);
+ exit_base = base;
+
+ evdns_base_resolve_ipv4(dns, "host", 0, generic_dns_callback, &r[0]);
+ evdns_base_resolve_ipv4(dns, "host2", 0, generic_dns_callback, &r[1]);
+ evdns_base_resolve_ipv4(dns, "host", DNS_NO_SEARCH, generic_dns_callback, &r[2]);
+ evdns_base_resolve_ipv4(dns, "host2", DNS_NO_SEARCH, generic_dns_callback, &r[3]);
+ evdns_base_resolve_ipv4(dns, "host3", 0, generic_dns_callback, &r[4]);
+ evdns_base_resolve_ipv4(dns, "hostn.a.example.com", DNS_NO_SEARCH, generic_dns_callback, &r[5]);
+ evdns_base_resolve_ipv4(dns, "hostn.b.example.com", DNS_NO_SEARCH, generic_dns_callback, &r[6]);
+ evdns_base_resolve_ipv4(dns, "hostn.c.example.com", DNS_NO_SEARCH, generic_dns_callback, &r[7]);
+
+ event_base_dispatch(base);
+
+ tt_int_op(r[0].type, ==, DNS_IPv4_A);
+ tt_int_op(r[0].count, ==, 1);
+ tt_int_op(((ev_uint32_t*)r[0].addrs)[0], ==, htonl(0x0b16212c));
+ tt_int_op(r[1].type, ==, DNS_IPv4_A);
+ tt_int_op(r[1].count, ==, 1);
+ tt_int_op(((ev_uint32_t*)r[1].addrs)[0], ==, htonl(0xc8640064));
+ tt_int_op(r[2].result, ==, DNS_ERR_NOTEXIST);
+ tt_int_op(r[3].result, ==, DNS_ERR_NOTEXIST);
+ tt_int_op(r[4].result, ==, DNS_ERR_NOTEXIST);
+ tt_int_op(r[5].result, ==, DNS_ERR_NODATA);
+ tt_int_op(r[5].ttl, ==, 42);
+ tt_int_op(r[6].result, ==, DNS_ERR_NOTEXIST);
+ tt_int_op(r[6].ttl, ==, 42);
+ tt_int_op(r[7].result, ==, DNS_ERR_NODATA);
+ tt_int_op(r[7].ttl, ==, 0);
+
+end:
+ if (dns)
+ evdns_base_free(dns, 0);
+
+ regress_clean_dnsserver();
+}
+static void
+dns_search_test(void *arg)
+{
+ return dns_search_test_impl(arg, 0);
+}
+static void
+dns_search_lower_test(void *arg)
+{
+ return dns_search_test_impl(arg, 1);
+}
+
+static int request_count = 0;
+static struct evdns_request *current_req = NULL;
+
+static void
+search_cancel_server_cb(struct evdns_server_request *req, void *data)
+{
+ const char *question;
+
+ if (req->nquestions != 1)
+ TT_DIE(("Only handling one question at a time; got %d",
+ req->nquestions));
+
+ question = req->questions[0]->name;
+
+ TT_BLATHER(("got question, %s", question));
+
+ tt_assert(request_count > 0);
+ tt_assert(!evdns_server_request_respond(req, 3));
+
+ if (!--request_count)
+ evdns_cancel_request(NULL, current_req);
+
+end:
+ ;
+}
+
+static void
+dns_search_cancel_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+ struct evdns_base *dns = NULL;
+ struct evdns_server_port *port = NULL;
+ ev_uint16_t portnum = 0;
+ struct generic_dns_callback_result r1;
+ char buf[64];
+
+ port = regress_get_dnsserver(base, &portnum, NULL,
+ search_cancel_server_cb, NULL);
+ tt_assert(port);
+ evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", (int)portnum);
+
+ dns = evdns_base_new(base, 0);
+ tt_assert(!evdns_base_nameserver_ip_add(dns, buf));
+
+ evdns_base_search_add(dns, "a.example.com");
+ evdns_base_search_add(dns, "b.example.com");
+ evdns_base_search_add(dns, "c.example.com");
+ evdns_base_search_add(dns, "d.example.com");
+
+ exit_base = base;
+ request_count = 3;
+ n_replies_left = 1;
+
+ current_req = evdns_base_resolve_ipv4(dns, "host", 0,
+ generic_dns_callback, &r1);
+ event_base_dispatch(base);
+
+ tt_int_op(r1.result, ==, DNS_ERR_CANCEL);
+
+end:
+ if (port)
+ evdns_close_server_port(port);
+ if (dns)
+ evdns_base_free(dns, 0);
+}
+
+static void
+fail_server_cb(struct evdns_server_request *req, void *data)
+{
+ const char *question;
+ int *count = data;
+ struct in_addr in;
+
+ /* Drop the first N requests that we get. */
+ if (*count > 0) {
+ --*count;
+ tt_want(! evdns_server_request_drop(req));
+ return;
+ }
+
+ if (req->nquestions != 1)
+ TT_DIE(("Only handling one question at a time; got %d",
+ req->nquestions));
+
+ question = req->questions[0]->name;
+
+ if (!evutil_ascii_strcasecmp(question, "google.com")) {
+ /* Detect a probe, and get out of the loop. */
+ event_base_loopexit(exit_base, NULL);
+ }
+
+ tt_assert(evutil_inet_pton(AF_INET, "16.32.64.128", &in));
+ evdns_server_request_add_a_reply(req, question, 1, &in.s_addr,
+ 100);
+ tt_assert(! evdns_server_request_respond(req, 0))
+ return;
+end:
+ tt_want(! evdns_server_request_drop(req));
+}
+
+static void
+dns_retry_test_impl(void *arg, int flags)
+{
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+ struct evdns_server_port *port = NULL;
+ struct evdns_base *dns = NULL;
+ int drop_count = 2;
+ ev_uint16_t portnum = 0;
+ char buf[64];
+
+ struct generic_dns_callback_result r1;
+
+ port = regress_get_dnsserver(base, &portnum, NULL,
+ fail_server_cb, &drop_count);
+ tt_assert(port);
+ evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", (int)portnum);
+
+ dns = evdns_base_new(base, flags);
+ tt_assert(!evdns_base_nameserver_ip_add(dns, buf));
+ tt_assert(! evdns_base_set_option(dns, "timeout", "0.2"));
+ tt_assert(! evdns_base_set_option(dns, "max-timeouts:", "10"));
+ tt_assert(! evdns_base_set_option(dns, "initial-probe-timeout", "0.1"));
+
+ evdns_base_resolve_ipv4(dns, "host.example.com", 0,
+ generic_dns_callback, &r1);
+
+ n_replies_left = 1;
+ exit_base = base;
+
+ event_base_dispatch(base);
+
+ tt_int_op(drop_count, ==, 0);
+
+ tt_int_op(r1.type, ==, DNS_IPv4_A);
+ tt_int_op(r1.count, ==, 1);
+ tt_int_op(((ev_uint32_t*)r1.addrs)[0], ==, htonl(0x10204080));
+
+ /* Now try again, but this time have the server get treated as
+ * failed, so we can send it a test probe. */
+ drop_count = 4;
+ tt_assert(! evdns_base_set_option(dns, "max-timeouts:", "2"));
+ tt_assert(! evdns_base_set_option(dns, "attempts:", "3"));
+ memset(&r1, 0, sizeof(r1));
+
+ evdns_base_resolve_ipv4(dns, "host.example.com", 0,
+ generic_dns_callback, &r1);
+
+ n_replies_left = 2;
+
+ /* This will run until it answers the "google.com" probe request. */
+ event_base_dispatch(base);
+
+ /* We'll treat the server as failed here. */
+ tt_int_op(r1.result, ==, DNS_ERR_TIMEOUT);
+
+ /* It should work this time. */
+ tt_int_op(drop_count, ==, 0);
+ evdns_base_resolve_ipv4(dns, "host.example.com", 0,
+ generic_dns_callback, &r1);
+
+ event_base_dispatch(base);
+ tt_int_op(r1.result, ==, DNS_ERR_NONE);
+ tt_int_op(r1.type, ==, DNS_IPv4_A);
+ tt_int_op(r1.count, ==, 1);
+ tt_int_op(((ev_uint32_t*)r1.addrs)[0], ==, htonl(0x10204080));
+
+end:
+ if (dns)
+ evdns_base_free(dns, 0);
+ if (port)
+ evdns_close_server_port(port);
+}
+static void
+dns_retry_test(void *arg)
+{
+ dns_retry_test_impl(arg, 0);
+}
+static void
+dns_retry_disable_when_inactive_test(void *arg)
+{
+ dns_retry_test_impl(arg, EVDNS_BASE_DISABLE_WHEN_INACTIVE);
+}
+
+static struct regress_dns_server_table internal_error_table[] = {
+ /* Error 4 (NOTIMPL) makes us reissue the request to another server
+ if we can.
+
+ XXXX we should reissue under a much wider set of circumstances!
+ */
+ { "foof.example.com", "err", "4", 0, 0 },
+ { NULL, NULL, NULL, 0, 0 }
+};
+
+static struct regress_dns_server_table reissue_table[] = {
+ { "foof.example.com", "A", "240.15.240.15", 0, 0 },
+ { NULL, NULL, NULL, 0, 0 }
+};
+
+static void
+dns_reissue_test_impl(void *arg, int flags)
+{
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+ struct evdns_server_port *port1 = NULL, *port2 = NULL;
+ struct evdns_base *dns = NULL;
+ struct generic_dns_callback_result r1;
+ ev_uint16_t portnum1 = 0, portnum2=0;
+ char buf1[64], buf2[64];
+
+ port1 = regress_get_dnsserver(base, &portnum1, NULL,
+ regress_dns_server_cb, internal_error_table);
+ tt_assert(port1);
+ port2 = regress_get_dnsserver(base, &portnum2, NULL,
+ regress_dns_server_cb, reissue_table);
+ tt_assert(port2);
+ evutil_snprintf(buf1, sizeof(buf1), "127.0.0.1:%d", (int)portnum1);
+ evutil_snprintf(buf2, sizeof(buf2), "127.0.0.1:%d", (int)portnum2);
+
+ dns = evdns_base_new(base, flags);
+ tt_assert(!evdns_base_nameserver_ip_add(dns, buf1));
+ tt_assert(! evdns_base_set_option(dns, "timeout:", "0.3"));
+ tt_assert(! evdns_base_set_option(dns, "max-timeouts:", "2"));
+ tt_assert(! evdns_base_set_option(dns, "attempts:", "5"));
+
+ memset(&r1, 0, sizeof(r1));
+ evdns_base_resolve_ipv4(dns, "foof.example.com", 0,
+ generic_dns_callback, &r1);
+
+ /* Add this after, so that we are sure to get a reissue. */
+ tt_assert(!evdns_base_nameserver_ip_add(dns, buf2));
+
+ n_replies_left = 1;
+ exit_base = base;
+
+ event_base_dispatch(base);
+ tt_int_op(r1.result, ==, DNS_ERR_NONE);
+ tt_int_op(r1.type, ==, DNS_IPv4_A);
+ tt_int_op(r1.count, ==, 1);
+ tt_int_op(((ev_uint32_t*)r1.addrs)[0], ==, htonl(0xf00ff00f));
+
+ /* Make sure we dropped at least once. */
+ tt_int_op(internal_error_table[0].seen, >, 0);
+
+end:
+ if (dns)
+ evdns_base_free(dns, 0);
+ if (port1)
+ evdns_close_server_port(port1);
+ if (port2)
+ evdns_close_server_port(port2);
+}
+static void
+dns_reissue_test(void *arg)
+{
+ dns_reissue_test_impl(arg, 0);
+}
+static void
+dns_reissue_disable_when_inactive_test(void *arg)
+{
+ dns_reissue_test_impl(arg, EVDNS_BASE_DISABLE_WHEN_INACTIVE);
+}
+
+#if 0
+static void
+dumb_bytes_fn(char *p, size_t n)
+{
+ unsigned i;
+ /* This gets us 6 bits of entropy per transaction ID, which means we
+ * will have probably have collisions and need to pick again. */
+ for (i=0;i<n;++i)
+ p[i] = (char)(rand() & 7);
+}
+#endif
+
+static void
+dns_inflight_test_impl(void *arg, int flags)
+{
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+ struct evdns_base *dns = NULL;
+ struct evdns_server_port *dns_port = NULL;
+ ev_uint16_t portnum = 0;
+ char buf[64];
+ int disable_when_inactive = flags & EVDNS_BASE_DISABLE_WHEN_INACTIVE;
+
+ struct generic_dns_callback_result r[20];
+ int i;
+
+ dns_port = regress_get_dnsserver(base, &portnum, NULL,
+ regress_dns_server_cb, reissue_table);
+ tt_assert(dns_port);
+ if (disable_when_inactive) {
+ exit_port = dns_port;
+ }
+
+ evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", (int)portnum);
+
+ dns = evdns_base_new(base, flags);
+ tt_assert(!evdns_base_nameserver_ip_add(dns, buf));
+ tt_assert(! evdns_base_set_option(dns, "max-inflight:", "3"));
+ tt_assert(! evdns_base_set_option(dns, "randomize-case:", "0"));
+
+ for (i=0;i<20;++i)
+ evdns_base_resolve_ipv4(dns, "foof.example.com", 0, generic_dns_callback, &r[i]);
+
+ n_replies_left = 20;
+ exit_base = base;
+
+ event_base_dispatch(base);
+
+ for (i=0;i<20;++i) {
+ tt_int_op(r[i].type, ==, DNS_IPv4_A);
+ tt_int_op(r[i].count, ==, 1);
+ tt_int_op(((ev_uint32_t*)r[i].addrs)[0], ==, htonl(0xf00ff00f));
+ }
+
+end:
+ if (dns)
+ evdns_base_free(dns, 0);
+ if (exit_port) {
+ evdns_close_server_port(exit_port);
+ exit_port = NULL;
+ } else if (! disable_when_inactive) {
+ evdns_close_server_port(dns_port);
+ }
+}
+
+static void
+dns_inflight_test(void *arg)
+{
+ dns_inflight_test_impl(arg, 0);
+}
+
+static void
+dns_disable_when_inactive_test(void *arg)
+{
+ dns_inflight_test_impl(arg, EVDNS_BASE_DISABLE_WHEN_INACTIVE);
+}
+
+static void
+dns_disable_when_inactive_no_ns_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base, *inactive_base;
+ struct evdns_base *dns = NULL;
+ ev_uint16_t portnum = 0;
+ char buf[64];
+ struct generic_dns_callback_result r;
+
+ inactive_base = event_base_new();
+ tt_assert(inactive_base);
+
+ /** Create dns server with inactive base, to avoid replying to clients */
+ tt_assert(regress_dnsserver(inactive_base, &portnum, search_table));
+ evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", (int)portnum);
+
+ dns = evdns_base_new(base, EVDNS_BASE_DISABLE_WHEN_INACTIVE);
+ tt_assert(!evdns_base_nameserver_ip_add(dns, buf));
+ tt_assert(! evdns_base_set_option(dns, "timeout:", "0.1"));
+
+ evdns_base_resolve_ipv4(dns, "foof.example.com", 0, generic_dns_callback, &r);
+ n_replies_left = 1;
+ exit_base = base;
+
+ event_base_dispatch(base);
+
+ tt_int_op(n_replies_left, ==, 0);
+
+ tt_int_op(r.result, ==, DNS_ERR_TIMEOUT);
+ tt_int_op(r.count, ==, 0);
+ tt_ptr_op(r.addrs, ==, NULL);
+
+end:
+ if (dns)
+ evdns_base_free(dns, 0);
+ regress_clean_dnsserver();
+ if (inactive_base)
+ event_base_free(inactive_base);
+}
+
+/* === Test for bufferevent_socket_connect_hostname */
+
+static int total_connected_or_failed = 0;
+static int total_n_accepted = 0;
+static struct event_base *be_connect_hostname_base = NULL;
+
+/* Implements a DNS server for the connect_hostname test and the
+ * getaddrinfo_async test */
+static void
+be_getaddrinfo_server_cb(struct evdns_server_request *req, void *data)
+{
+ int i;
+ int *n_got_p=data;
+ int added_any=0;
+ ++*n_got_p;
+
+ for (i=0;i<req->nquestions;++i) {
+ const int qtype = req->questions[i]->type;
+ const int qclass = req->questions[i]->dns_question_class;
+ const char *qname = req->questions[i]->name;
+ struct in_addr ans;
+ struct in6_addr ans6;
+ memset(&ans6, 0, sizeof(ans6));
+
+ TT_BLATHER(("Got question about %s, type=%d", qname, qtype));
+
+ if (qtype == EVDNS_TYPE_A &&
+ qclass == EVDNS_CLASS_INET &&
+ !evutil_ascii_strcasecmp(qname, "nobodaddy.example.com")) {
+ ans.s_addr = htonl(0x7f000001);
+ evdns_server_request_add_a_reply(req, qname,
+ 1, &ans.s_addr, 2000);
+ added_any = 1;
+ } else if (!evutil_ascii_strcasecmp(qname,
+ "nosuchplace.example.com")) {
+ /* ok, just say notfound. */
+ } else if (!evutil_ascii_strcasecmp(qname,
+ "both.example.com")) {
+ if (qtype == EVDNS_TYPE_A) {
+ ans.s_addr = htonl(0x50502020);
+ evdns_server_request_add_a_reply(req, qname,
+ 1, &ans.s_addr, 2000);
+ added_any = 1;
+ } else if (qtype == EVDNS_TYPE_AAAA) {
+ ans6.s6_addr[0] = 0x80;
+ ans6.s6_addr[1] = 0xff;
+ ans6.s6_addr[14] = 0xbb;
+ ans6.s6_addr[15] = 0xbb;
+ evdns_server_request_add_aaaa_reply(req, qname,
+ 1, &ans6.s6_addr, 2000);
+ added_any = 1;
+ }
+ evdns_server_request_add_cname_reply(req, qname,
+ "both-canonical.example.com", 1000);
+ } else if (!evutil_ascii_strcasecmp(qname,
+ "v4only.example.com") ||
+ !evutil_ascii_strcasecmp(qname, "v4assert.example.com")) {
+ if (qtype == EVDNS_TYPE_A) {
+ ans.s_addr = htonl(0x12345678);
+ evdns_server_request_add_a_reply(req, qname,
+ 1, &ans.s_addr, 2000);
+ added_any = 1;
+ } else if (!evutil_ascii_strcasecmp(qname,
+ "v4assert.example.com")) {
+ TT_FAIL(("Got an AAAA request for v4assert"));
+ }
+ } else if (!evutil_ascii_strcasecmp(qname,
+ "v6only.example.com") ||
+ !evutil_ascii_strcasecmp(qname, "v6assert.example.com")) {
+ if (qtype == EVDNS_TYPE_AAAA) {
+ ans6.s6_addr[0] = 0x0b;
+ ans6.s6_addr[1] = 0x0b;
+ ans6.s6_addr[14] = 0xf0;
+ ans6.s6_addr[15] = 0x0d;
+ evdns_server_request_add_aaaa_reply(req, qname,
+ 1, &ans6.s6_addr, 2000);
+ added_any = 1;
+ } else if (!evutil_ascii_strcasecmp(qname,
+ "v6assert.example.com")) {
+ TT_FAIL(("Got a A request for v6assert"));
+ }
+ } else if (!evutil_ascii_strcasecmp(qname,
+ "v6timeout.example.com")) {
+ if (qtype == EVDNS_TYPE_A) {
+ ans.s_addr = htonl(0xabcdef01);
+ evdns_server_request_add_a_reply(req, qname,
+ 1, &ans.s_addr, 2000);
+ added_any = 1;
+ } else if (qtype == EVDNS_TYPE_AAAA) {
+ /* Let the v6 request time out.*/
+ evdns_server_request_drop(req);
+ return;
+ }
+ } else if (!evutil_ascii_strcasecmp(qname,
+ "v4timeout.example.com")) {
+ if (qtype == EVDNS_TYPE_AAAA) {
+ ans6.s6_addr[0] = 0x0a;
+ ans6.s6_addr[1] = 0x0a;
+ ans6.s6_addr[14] = 0xff;
+ ans6.s6_addr[15] = 0x01;
+ evdns_server_request_add_aaaa_reply(req, qname,
+ 1, &ans6.s6_addr, 2000);
+ added_any = 1;
+ } else if (qtype == EVDNS_TYPE_A) {
+ /* Let the v4 request time out.*/
+ evdns_server_request_drop(req);
+ return;
+ }
+ } else if (!evutil_ascii_strcasecmp(qname,
+ "v6timeout-nonexist.example.com")) {
+ if (qtype == EVDNS_TYPE_A) {
+ /* Fall through, give an nexist. */
+ } else if (qtype == EVDNS_TYPE_AAAA) {
+ /* Let the v6 request time out.*/
+ evdns_server_request_drop(req);
+ return;
+ }
+ } else if (!evutil_ascii_strcasecmp(qname,
+ "all-timeout.example.com")) {
+ /* drop all requests */
+ evdns_server_request_drop(req);
+ return;
+ } else {
+ TT_GRIPE(("Got weird request for %s",qname));
+ }
+ }
+ if (added_any) {
+ TT_BLATHER(("answering"));
+ evdns_server_request_respond(req, 0);
+ } else {
+ TT_BLATHER(("saying nexist."));
+ evdns_server_request_respond(req, 3);
+ }
+}
+
+/* Implements a listener for connect_hostname test. */
+static void
+nil_accept_cb(struct evconnlistener *l, evutil_socket_t fd, struct sockaddr *s,
+ int socklen, void *arg)
+{
+ int *p = arg;
+ (*p)++;
+ ++total_n_accepted;
+ /* don't do anything with the socket; let it close when we exit() */
+ if (total_n_accepted >= 3 && total_connected_or_failed >= 5)
+ event_base_loopexit(be_connect_hostname_base,
+ NULL);
+}
+
+struct be_conn_hostname_result {
+ int dnserr;
+ int what;
+};
+
+/* Bufferevent event callback for the connect_hostname test: remembers what
+ * event we got. */
+static void
+be_connect_hostname_event_cb(struct bufferevent *bev, short what, void *ctx)
+{
+ struct be_conn_hostname_result *got = ctx;
+ if (!got->what) {
+ TT_BLATHER(("Got a bufferevent event %d", what));
+ got->what = what;
+
+ if ((what & BEV_EVENT_CONNECTED) || (what & BEV_EVENT_ERROR)) {
+ int r;
+ if ((r = bufferevent_socket_get_dns_error(bev))) {
+ got->dnserr = r;
+ TT_BLATHER(("DNS error %d: %s", r,
+ evutil_gai_strerror(r)));
+ } ++total_connected_or_failed;
+ TT_BLATHER(("Got %d connections or errors.", total_connected_or_failed));
+
+ if (total_n_accepted >= 3 && total_connected_or_failed >= 5)
+ event_base_loopexit(be_connect_hostname_base,
+ NULL);
+ }
+ } else {
+ TT_FAIL(("Two events on one bufferevent. %d,%d",
+ got->what, (int)what));
+ }
+}
+
+static void
+test_bufferevent_connect_hostname(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct evconnlistener *listener = NULL;
+ struct bufferevent *be1=NULL, *be2=NULL, *be3=NULL, *be4=NULL, *be5=NULL;
+ struct be_conn_hostname_result be1_outcome={0,0}, be2_outcome={0,0},
+ be3_outcome={0,0}, be4_outcome={0,0}, be5_outcome={0,0};
+ int expect_err5;
+ struct evdns_base *dns=NULL;
+ struct evdns_server_port *port=NULL;
+ struct sockaddr_in sin;
+ int listener_port=-1;
+ ev_uint16_t dns_port=0;
+ int n_accept=0, n_dns=0;
+ char buf[128];
+
+ be_connect_hostname_base = data->base;
+
+ /* Bind an address and figure out what port it's on. */
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = htonl(0x7f000001); /* 127.0.0.1 */
+ sin.sin_port = 0;
+ listener = evconnlistener_new_bind(data->base, nil_accept_cb,
+ &n_accept,
+ LEV_OPT_REUSEABLE|LEV_OPT_CLOSE_ON_EXEC,
+ -1, (struct sockaddr *)&sin, sizeof(sin));
+ tt_assert(listener);
+ listener_port = regress_get_socket_port(
+ evconnlistener_get_fd(listener));
+
+ port = regress_get_dnsserver(data->base, &dns_port, NULL,
+ be_getaddrinfo_server_cb, &n_dns);
+ tt_assert(port);
+ tt_int_op(dns_port, >=, 0);
+
+ /* Start an evdns_base that uses the server as its resolver. */
+ dns = evdns_base_new(data->base, 0);
+ evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", (int)dns_port);
+ evdns_base_nameserver_ip_add(dns, buf);
+
+ /* Now, finally, at long last, launch the bufferevents. One should do
+ * a failing lookup IP, one should do a successful lookup by IP,
+ * and one should do a successful lookup by hostname. */
+ be1 = bufferevent_socket_new(data->base, -1, BEV_OPT_CLOSE_ON_FREE);
+ be2 = bufferevent_socket_new(data->base, -1, BEV_OPT_CLOSE_ON_FREE);
+ be3 = bufferevent_socket_new(data->base, -1, BEV_OPT_CLOSE_ON_FREE);
+ be4 = bufferevent_socket_new(data->base, -1, BEV_OPT_CLOSE_ON_FREE);
+ be5 = bufferevent_socket_new(data->base, -1, BEV_OPT_CLOSE_ON_FREE);
+
+ bufferevent_setcb(be1, NULL, NULL, be_connect_hostname_event_cb,
+ &be1_outcome);
+ bufferevent_setcb(be2, NULL, NULL, be_connect_hostname_event_cb,
+ &be2_outcome);
+ bufferevent_setcb(be3, NULL, NULL, be_connect_hostname_event_cb,
+ &be3_outcome);
+ bufferevent_setcb(be4, NULL, NULL, be_connect_hostname_event_cb,
+ &be4_outcome);
+ bufferevent_setcb(be5, NULL, NULL, be_connect_hostname_event_cb,
+ &be5_outcome);
+
+ /* Launch an async resolve that will fail. */
+ tt_assert(!bufferevent_socket_connect_hostname(be1, dns, AF_INET,
+ "nosuchplace.example.com", listener_port));
+ /* Connect to the IP without resolving. */
+ tt_assert(!bufferevent_socket_connect_hostname(be2, dns, AF_INET,
+ "127.0.0.1", listener_port));
+ /* Launch an async resolve that will succeed. */
+ tt_assert(!bufferevent_socket_connect_hostname(be3, dns, AF_INET,
+ "nobodaddy.example.com", listener_port));
+ /* Use the blocking resolver. This one will fail if your resolver
+ * can't resolve localhost to 127.0.0.1 */
+ tt_assert(!bufferevent_socket_connect_hostname(be4, NULL, AF_INET,
+ "localhost", listener_port));
+ /* Use the blocking resolver with a nonexistent hostname. */
+ tt_assert(!bufferevent_socket_connect_hostname(be5, NULL, AF_INET,
+ "nonesuch.nowhere.example.com", 80));
+ {
+ /* The blocking resolver will use the system nameserver, which
+ * might tell us anything. (Yes, some twits even pretend that
+ * example.com is real.) Let's see what answer to expect. */
+ struct evutil_addrinfo hints, *ai = NULL;
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_INET;
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_protocol = IPPROTO_TCP;
+ expect_err5 = evutil_getaddrinfo(
+ "nonesuch.nowhere.example.com", "80", &hints, &ai);
+ }
+
+ event_base_dispatch(data->base);
+
+ tt_int_op(be1_outcome.what, ==, BEV_EVENT_ERROR);
+ tt_int_op(be1_outcome.dnserr, ==, EVUTIL_EAI_NONAME);
+ tt_int_op(be2_outcome.what, ==, BEV_EVENT_CONNECTED);
+ tt_int_op(be2_outcome.dnserr, ==, 0);
+ tt_int_op(be3_outcome.what, ==, BEV_EVENT_CONNECTED);
+ tt_int_op(be3_outcome.dnserr, ==, 0);
+ tt_int_op(be4_outcome.what, ==, BEV_EVENT_CONNECTED);
+ tt_int_op(be4_outcome.dnserr, ==, 0);
+ if (expect_err5) {
+ tt_int_op(be5_outcome.what, ==, BEV_EVENT_ERROR);
+ tt_int_op(be5_outcome.dnserr, ==, expect_err5);
+ }
+
+ tt_int_op(n_accept, ==, 3);
+ tt_int_op(n_dns, ==, 2);
+
+end:
+ if (listener)
+ evconnlistener_free(listener);
+ if (port)
+ evdns_close_server_port(port);
+ if (dns)
+ evdns_base_free(dns, 0);
+ if (be1)
+ bufferevent_free(be1);
+ if (be2)
+ bufferevent_free(be2);
+ if (be3)
+ bufferevent_free(be3);
+ if (be4)
+ bufferevent_free(be4);
+ if (be5)
+ bufferevent_free(be5);
+}
+
+
+struct gai_outcome {
+ int err;
+ struct evutil_addrinfo *ai;
+};
+
+static int n_gai_results_pending = 0;
+static struct event_base *exit_base_on_no_pending_results = NULL;
+
+static void
+gai_cb(int err, struct evutil_addrinfo *res, void *ptr)
+{
+ struct gai_outcome *go = ptr;
+ go->err = err;
+ go->ai = res;
+ if (--n_gai_results_pending <= 0 && exit_base_on_no_pending_results)
+ event_base_loopexit(exit_base_on_no_pending_results, NULL);
+ if (n_gai_results_pending < 900)
+ TT_BLATHER(("Got an answer; expecting %d more.",
+ n_gai_results_pending));
+}
+
+static void
+cancel_gai_cb(evutil_socket_t fd, short what, void *ptr)
+{
+ struct evdns_getaddrinfo_request *r = ptr;
+ evdns_getaddrinfo_cancel(r);
+}
+
+static void
+test_getaddrinfo_async(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct evutil_addrinfo hints, *a;
+ struct gai_outcome local_outcome;
+ struct gai_outcome a_out[12];
+ int i;
+ struct evdns_getaddrinfo_request *r;
+ char buf[128];
+ struct evdns_server_port *port = NULL;
+ ev_uint16_t dns_port = 0;
+ int n_dns_questions = 0;
+ struct evdns_base *dns_base;
+
+ memset(a_out, 0, sizeof(a_out));
+ memset(&local_outcome, 0, sizeof(local_outcome));
+
+ dns_base = evdns_base_new(data->base, 0);
+ tt_assert(dns_base);
+
+ /* for localhost */
+ evdns_base_load_hosts(dns_base, NULL);
+
+ tt_assert(! evdns_base_set_option(dns_base, "timeout", "0.3"));
+ tt_assert(! evdns_base_set_option(dns_base, "getaddrinfo-allow-skew", "0.2"));
+
+ n_gai_results_pending = 10000; /* don't think about exiting yet. */
+
+ /* 1. Try some cases that will never hit the asynchronous resolver. */
+ /* 1a. Simple case with a symbolic service name */
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+ memset(&local_outcome, 0, sizeof(local_outcome));
+ r = evdns_getaddrinfo(dns_base, "1.2.3.4", "http",
+ &hints, gai_cb, &local_outcome);
+ tt_assert(! r);
+ if (!local_outcome.err) {
+ tt_ptr_op(local_outcome.ai,!=,NULL);
+ test_ai_eq(local_outcome.ai, "1.2.3.4:80", SOCK_STREAM, IPPROTO_TCP);
+ evutil_freeaddrinfo(local_outcome.ai);
+ local_outcome.ai = NULL;
+ } else {
+ TT_BLATHER(("Apparently we have no getservbyname."));
+ }
+
+ /* 1b. EVUTIL_AI_NUMERICHOST is set */
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_flags = EVUTIL_AI_NUMERICHOST;
+ memset(&local_outcome, 0, sizeof(local_outcome));
+ r = evdns_getaddrinfo(dns_base, "www.google.com", "80",
+ &hints, gai_cb, &local_outcome);
+ tt_ptr_op(r,==,NULL);
+ tt_int_op(local_outcome.err,==,EVUTIL_EAI_NONAME);
+ tt_ptr_op(local_outcome.ai,==,NULL);
+
+ /* 1c. We give a numeric address (ipv6) */
+ memset(&hints, 0, sizeof(hints));
+ memset(&local_outcome, 0, sizeof(local_outcome));
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_protocol = IPPROTO_TCP;
+ r = evdns_getaddrinfo(dns_base, "f::f", "8008",
+ &hints, gai_cb, &local_outcome);
+ tt_assert(!r);
+ tt_int_op(local_outcome.err,==,0);
+ tt_assert(local_outcome.ai);
+ tt_ptr_op(local_outcome.ai->ai_next,==,NULL);
+ test_ai_eq(local_outcome.ai, "[f::f]:8008", SOCK_STREAM, IPPROTO_TCP);
+ evutil_freeaddrinfo(local_outcome.ai);
+ local_outcome.ai = NULL;
+
+ /* 1d. We give a numeric address (ipv4) */
+ memset(&hints, 0, sizeof(hints));
+ memset(&local_outcome, 0, sizeof(local_outcome));
+ hints.ai_family = PF_UNSPEC;
+ r = evdns_getaddrinfo(dns_base, "5.6.7.8", NULL,
+ &hints, gai_cb, &local_outcome);
+ tt_assert(!r);
+ tt_int_op(local_outcome.err,==,0);
+ tt_assert(local_outcome.ai);
+ a = ai_find_by_protocol(local_outcome.ai, IPPROTO_TCP);
+ tt_assert(a);
+ test_ai_eq(a, "5.6.7.8", SOCK_STREAM, IPPROTO_TCP);
+ a = ai_find_by_protocol(local_outcome.ai, IPPROTO_UDP);
+ tt_assert(a);
+ test_ai_eq(a, "5.6.7.8", SOCK_DGRAM, IPPROTO_UDP);
+ evutil_freeaddrinfo(local_outcome.ai);
+ local_outcome.ai = NULL;
+
+ /* 1e. nodename is NULL (bind) */
+ memset(&hints, 0, sizeof(hints));
+ memset(&local_outcome, 0, sizeof(local_outcome));
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_socktype = SOCK_DGRAM;
+ hints.ai_flags = EVUTIL_AI_PASSIVE;
+ r = evdns_getaddrinfo(dns_base, NULL, "9090",
+ &hints, gai_cb, &local_outcome);
+ tt_assert(!r);
+ tt_int_op(local_outcome.err,==,0);
+ tt_assert(local_outcome.ai);
+ /* we should get a v4 address of 0.0.0.0... */
+ a = ai_find_by_family(local_outcome.ai, PF_INET);
+ tt_assert(a);
+ test_ai_eq(a, "0.0.0.0:9090", SOCK_DGRAM, IPPROTO_UDP);
+ /* ... and a v6 address of ::0 */
+ a = ai_find_by_family(local_outcome.ai, PF_INET6);
+ tt_assert(a);
+ test_ai_eq(a, "[::]:9090", SOCK_DGRAM, IPPROTO_UDP);
+ evutil_freeaddrinfo(local_outcome.ai);
+ local_outcome.ai = NULL;
+
+ /* 1f. nodename is NULL (connect) */
+ memset(&hints, 0, sizeof(hints));
+ memset(&local_outcome, 0, sizeof(local_outcome));
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+ r = evdns_getaddrinfo(dns_base, NULL, "2",
+ &hints, gai_cb, &local_outcome);
+ tt_assert(!r);
+ tt_int_op(local_outcome.err,==,0);
+ tt_assert(local_outcome.ai);
+ /* we should get a v4 address of 127.0.0.1 .... */
+ a = ai_find_by_family(local_outcome.ai, PF_INET);
+ tt_assert(a);
+ test_ai_eq(a, "127.0.0.1:2", SOCK_STREAM, IPPROTO_TCP);
+ /* ... and a v6 address of ::1 */
+ a = ai_find_by_family(local_outcome.ai, PF_INET6);
+ tt_assert(a);
+ test_ai_eq(a, "[::1]:2", SOCK_STREAM, IPPROTO_TCP);
+ evutil_freeaddrinfo(local_outcome.ai);
+ local_outcome.ai = NULL;
+
+ /* 1g. We find localhost immediately. (pf_unspec) */
+ memset(&hints, 0, sizeof(hints));
+ memset(&local_outcome, 0, sizeof(local_outcome));
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+ r = evdns_getaddrinfo(dns_base, "LOCALHOST", "80",
+ &hints, gai_cb, &local_outcome);
+ tt_assert(!r);
+ tt_int_op(local_outcome.err,==,0);
+ tt_assert(local_outcome.ai);
+ /* we should get a v4 address of 127.0.0.1 .... */
+ a = ai_find_by_family(local_outcome.ai, PF_INET);
+ tt_assert(a);
+ test_ai_eq(a, "127.0.0.1:80", SOCK_STREAM, IPPROTO_TCP);
+ /* ... and a v6 address of ::1 */
+ a = ai_find_by_family(local_outcome.ai, PF_INET6);
+ tt_assert(a);
+ test_ai_eq(a, "[::1]:80", SOCK_STREAM, IPPROTO_TCP);
+ evutil_freeaddrinfo(local_outcome.ai);
+ local_outcome.ai = NULL;
+
+ /* 1g. We find localhost immediately. (pf_inet6) */
+ memset(&hints, 0, sizeof(hints));
+ memset(&local_outcome, 0, sizeof(local_outcome));
+ hints.ai_family = PF_INET6;
+ hints.ai_socktype = SOCK_STREAM;
+ r = evdns_getaddrinfo(dns_base, "LOCALHOST", "9999",
+ &hints, gai_cb, &local_outcome);
+ tt_assert(! r);
+ tt_int_op(local_outcome.err,==,0);
+ tt_assert(local_outcome.ai);
+ a = local_outcome.ai;
+ test_ai_eq(a, "[::1]:9999", SOCK_STREAM, IPPROTO_TCP);
+ tt_ptr_op(a->ai_next, ==, NULL);
+ evutil_freeaddrinfo(local_outcome.ai);
+ local_outcome.ai = NULL;
+
+ /* 2. Okay, now we can actually test the asynchronous resolver. */
+ /* Start a dummy local dns server... */
+ port = regress_get_dnsserver(data->base, &dns_port, NULL,
+ be_getaddrinfo_server_cb, &n_dns_questions);
+ tt_assert(port);
+ tt_int_op(dns_port, >=, 0);
+ /* ... and tell the evdns_base about it. */
+ evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", dns_port);
+ evdns_base_nameserver_ip_add(dns_base, buf);
+
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_flags = EVUTIL_AI_CANONNAME;
+ /* 0: Request for both.example.com should return both addresses. */
+ r = evdns_getaddrinfo(dns_base, "both.example.com", "8000",
+ &hints, gai_cb, &a_out[0]);
+ tt_assert(r);
+
+ /* 1: Request for v4only.example.com should return one address. */
+ r = evdns_getaddrinfo(dns_base, "v4only.example.com", "8001",
+ &hints, gai_cb, &a_out[1]);
+ tt_assert(r);
+
+ /* 2: Request for v6only.example.com should return one address. */
+ hints.ai_flags = 0;
+ r = evdns_getaddrinfo(dns_base, "v6only.example.com", "8002",
+ &hints, gai_cb, &a_out[2]);
+ tt_assert(r);
+
+ /* 3: PF_INET request for v4assert.example.com should not generate a
+ * v6 request. The server will fail the test if it does. */
+ hints.ai_family = PF_INET;
+ r = evdns_getaddrinfo(dns_base, "v4assert.example.com", "8003",
+ &hints, gai_cb, &a_out[3]);
+ tt_assert(r);
+
+ /* 4: PF_INET6 request for v6assert.example.com should not generate a
+ * v4 request. The server will fail the test if it does. */
+ hints.ai_family = PF_INET6;
+ r = evdns_getaddrinfo(dns_base, "v6assert.example.com", "8004",
+ &hints, gai_cb, &a_out[4]);
+ tt_assert(r);
+
+ /* 5: PF_INET request for nosuchplace.example.com should give NEXIST. */
+ hints.ai_family = PF_INET;
+ r = evdns_getaddrinfo(dns_base, "nosuchplace.example.com", "8005",
+ &hints, gai_cb, &a_out[5]);
+ tt_assert(r);
+
+ /* 6: PF_UNSPEC request for nosuchplace.example.com should give NEXIST.
+ */
+ hints.ai_family = PF_UNSPEC;
+ r = evdns_getaddrinfo(dns_base, "nosuchplace.example.com", "8006",
+ &hints, gai_cb, &a_out[6]);
+ tt_assert(r);
+
+ /* 7: PF_UNSPEC request for v6timeout.example.com should give an ipv4
+ * address only. */
+ hints.ai_family = PF_UNSPEC;
+ r = evdns_getaddrinfo(dns_base, "v6timeout.example.com", "8007",
+ &hints, gai_cb, &a_out[7]);
+ tt_assert(r);
+
+ /* 8: PF_UNSPEC request for v6timeout-nonexist.example.com should give
+ * a NEXIST */
+ hints.ai_family = PF_UNSPEC;
+ r = evdns_getaddrinfo(dns_base, "v6timeout-nonexist.example.com",
+ "8008", &hints, gai_cb, &a_out[8]);
+ tt_assert(r);
+
+ /* 9: AI_ADDRCONFIG should at least not crash. Can't test it more
+ * without knowing what kind of internet we have. */
+ hints.ai_flags |= EVUTIL_AI_ADDRCONFIG;
+ r = evdns_getaddrinfo(dns_base, "both.example.com",
+ "8009", &hints, gai_cb, &a_out[9]);
+ tt_assert(r);
+
+ /* 10: PF_UNSPEC for v4timeout.example.com should give an ipv6 address
+ * only. */
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_flags = 0;
+ r = evdns_getaddrinfo(dns_base, "v4timeout.example.com", "8010",
+ &hints, gai_cb, &a_out[10]);
+ tt_assert(r);
+
+ /* 11: timeout.example.com: cancel it after 100 msec. */
+ r = evdns_getaddrinfo(dns_base, "all-timeout.example.com", "8011",
+ &hints, gai_cb, &a_out[11]);
+ tt_assert(r);
+ {
+ struct timeval tv;
+ tv.tv_sec = 0;
+ tv.tv_usec = 100*1000; /* 100 msec */
+ event_base_once(data->base, -1, EV_TIMEOUT, cancel_gai_cb,
+ r, &tv);
+ }
+
+ /* XXXXX There are more tests we could do, including:
+
+ - A test to elicit NODATA.
+
+ */
+
+ n_gai_results_pending = 12;
+ exit_base_on_no_pending_results = data->base;
+
+ event_base_dispatch(data->base);
+
+ /* 0: both.example.com */
+ tt_int_op(a_out[0].err, ==, 0);
+ tt_assert(a_out[0].ai);
+ tt_assert(a_out[0].ai->ai_next);
+ tt_assert(!a_out[0].ai->ai_next->ai_next);
+ a = ai_find_by_family(a_out[0].ai, PF_INET);
+ tt_assert(a);
+ test_ai_eq(a, "80.80.32.32:8000", SOCK_STREAM, IPPROTO_TCP);
+ a = ai_find_by_family(a_out[0].ai, PF_INET6);
+ tt_assert(a);
+ test_ai_eq(a, "[80ff::bbbb]:8000", SOCK_STREAM, IPPROTO_TCP);
+ tt_assert(a_out[0].ai->ai_canonname);
+ tt_str_op(a_out[0].ai->ai_canonname, ==, "both-canonical.example.com");
+
+ /* 1: v4only.example.com */
+ tt_int_op(a_out[1].err, ==, 0);
+ tt_assert(a_out[1].ai);
+ tt_assert(! a_out[1].ai->ai_next);
+ test_ai_eq(a_out[1].ai, "18.52.86.120:8001", SOCK_STREAM, IPPROTO_TCP);
+ tt_assert(a_out[1].ai->ai_canonname == NULL);
+
+
+ /* 2: v6only.example.com */
+ tt_int_op(a_out[2].err, ==, 0);
+ tt_assert(a_out[2].ai);
+ tt_assert(! a_out[2].ai->ai_next);
+ test_ai_eq(a_out[2].ai, "[b0b::f00d]:8002", SOCK_STREAM, IPPROTO_TCP);
+
+ /* 3: v4assert.example.com */
+ tt_int_op(a_out[3].err, ==, 0);
+ tt_assert(a_out[3].ai);
+ tt_assert(! a_out[3].ai->ai_next);
+ test_ai_eq(a_out[3].ai, "18.52.86.120:8003", SOCK_STREAM, IPPROTO_TCP);
+
+ /* 4: v6assert.example.com */
+ tt_int_op(a_out[4].err, ==, 0);
+ tt_assert(a_out[4].ai);
+ tt_assert(! a_out[4].ai->ai_next);
+ test_ai_eq(a_out[4].ai, "[b0b::f00d]:8004", SOCK_STREAM, IPPROTO_TCP);
+
+ /* 5: nosuchplace.example.com (inet) */
+ tt_int_op(a_out[5].err, ==, EVUTIL_EAI_NONAME);
+ tt_assert(! a_out[5].ai);
+
+ /* 6: nosuchplace.example.com (unspec) */
+ tt_int_op(a_out[6].err, ==, EVUTIL_EAI_NONAME);
+ tt_assert(! a_out[6].ai);
+
+ /* 7: v6timeout.example.com */
+ tt_int_op(a_out[7].err, ==, 0);
+ tt_assert(a_out[7].ai);
+ tt_assert(! a_out[7].ai->ai_next);
+ test_ai_eq(a_out[7].ai, "171.205.239.1:8007", SOCK_STREAM, IPPROTO_TCP);
+
+ /* 8: v6timeout-nonexist.example.com */
+ tt_int_op(a_out[8].err, ==, EVUTIL_EAI_NONAME);
+ tt_assert(! a_out[8].ai);
+
+ /* 9: both (ADDRCONFIG) */
+ tt_int_op(a_out[9].err, ==, 0);
+ tt_assert(a_out[9].ai);
+ a = ai_find_by_family(a_out[9].ai, PF_INET);
+ if (a)
+ test_ai_eq(a, "80.80.32.32:8009", SOCK_STREAM, IPPROTO_TCP);
+ else
+ tt_assert(ai_find_by_family(a_out[9].ai, PF_INET6));
+ a = ai_find_by_family(a_out[9].ai, PF_INET6);
+ if (a)
+ test_ai_eq(a, "[80ff::bbbb]:8009", SOCK_STREAM, IPPROTO_TCP);
+ else
+ tt_assert(ai_find_by_family(a_out[9].ai, PF_INET));
+
+ /* 10: v4timeout.example.com */
+ tt_int_op(a_out[10].err, ==, 0);
+ tt_assert(a_out[10].ai);
+ tt_assert(! a_out[10].ai->ai_next);
+ test_ai_eq(a_out[10].ai, "[a0a::ff01]:8010", SOCK_STREAM, IPPROTO_TCP);
+
+ /* 11: cancelled request. */
+ tt_int_op(a_out[11].err, ==, EVUTIL_EAI_CANCEL);
+ tt_assert(a_out[11].ai == NULL);
+
+end:
+ if (local_outcome.ai)
+ evutil_freeaddrinfo(local_outcome.ai);
+ for (i=0;i<(int)ARRAY_SIZE(a_out);++i) {
+ if (a_out[i].ai)
+ evutil_freeaddrinfo(a_out[i].ai);
+ }
+ if (port)
+ evdns_close_server_port(port);
+ if (dns_base)
+ evdns_base_free(dns_base, 0);
+}
+
+struct gaic_request_status {
+ int magic;
+ struct event_base *base;
+ struct evdns_base *dns_base;
+ struct evdns_getaddrinfo_request *request;
+ struct event cancel_event;
+ int canceled;
+};
+
+#define GAIC_MAGIC 0x1234abcd
+
+static int pending = 0;
+
+static void
+gaic_cancel_request_cb(evutil_socket_t fd, short what, void *arg)
+{
+ struct gaic_request_status *status = arg;
+
+ tt_assert(status->magic == GAIC_MAGIC);
+ status->canceled = 1;
+ evdns_getaddrinfo_cancel(status->request);
+ return;
+end:
+ event_base_loopexit(status->base, NULL);
+}
+
+static void
+gaic_server_cb(struct evdns_server_request *req, void *arg)
+{
+ ev_uint32_t answer = 0x7f000001;
+ tt_assert(req->nquestions);
+ evdns_server_request_add_a_reply(req, req->questions[0]->name, 1,
+ &answer, 100);
+ evdns_server_request_respond(req, 0);
+ return;
+end:
+ evdns_server_request_respond(req, DNS_ERR_REFUSED);
+}
+
+
+static void
+gaic_getaddrinfo_cb(int result, struct evutil_addrinfo *res, void *arg)
+{
+ struct gaic_request_status *status = arg;
+ struct event_base *base = status->base;
+ tt_assert(status->magic == GAIC_MAGIC);
+
+ if (result == EVUTIL_EAI_CANCEL) {
+ tt_assert(status->canceled);
+ }
+ event_del(&status->cancel_event);
+
+ memset(status, 0xf0, sizeof(*status));
+ free(status);
+
+end:
+ if (--pending <= 0)
+ event_base_loopexit(base, NULL);
+}
+
+static void
+gaic_launch(struct event_base *base, struct evdns_base *dns_base)
+{
+ struct gaic_request_status *status = calloc(1,sizeof(*status));
+ struct timeval tv = { 0, 10000 };
+ status->magic = GAIC_MAGIC;
+ status->base = base;
+ status->dns_base = dns_base;
+ event_assign(&status->cancel_event, base, -1, 0, gaic_cancel_request_cb,
+ status);
+ status->request = evdns_getaddrinfo(dns_base,
+ "foobar.bazquux.example.com", "80", NULL, gaic_getaddrinfo_cb,
+ status);
+ event_add(&status->cancel_event, &tv);
+ ++pending;
+}
+
+#ifdef EVENT_SET_MEM_FUNCTIONS_IMPLEMENTED
+/* FIXME: We should move this to regress_main.c if anything else needs it.*/
+
+/* Trivial replacements for malloc/free/realloc to check for memory leaks.
+ * Not threadsafe. */
+static int allocated_chunks = 0;
+
+static void *
+cnt_malloc(size_t sz)
+{
+ allocated_chunks += 1;
+ return malloc(sz);
+}
+
+static void *
+cnt_realloc(void *old, size_t sz)
+{
+ if (!old)
+ allocated_chunks += 1;
+ if (!sz)
+ allocated_chunks -= 1;
+ return realloc(old, sz);
+}
+
+static void
+cnt_free(void *ptr)
+{
+ allocated_chunks -= 1;
+ free(ptr);
+}
+
+struct testleak_env_t {
+ struct event_base *base;
+ struct evdns_base *dns_base;
+ struct evdns_request *req;
+ struct generic_dns_callback_result r;
+};
+
+static void *
+testleak_setup(const struct testcase_t *testcase)
+{
+ struct testleak_env_t *env;
+
+ allocated_chunks = 0;
+
+ /* Reset allocation counter, to start allocations from the very beginning.
+ * (this will avoid false-positive negative numbers for allocated_chunks)
+ */
+ libevent_global_shutdown();
+
+ event_set_mem_functions(cnt_malloc, cnt_realloc, cnt_free);
+
+ event_enable_debug_mode();
+
+ /* not mm_calloc: we don't want to mess with the count. */
+ env = calloc(1, sizeof(struct testleak_env_t));
+ env->base = event_base_new();
+ env->dns_base = evdns_base_new(env->base, 0);
+ env->req = evdns_base_resolve_ipv4(
+ env->dns_base, "example.com", DNS_QUERY_NO_SEARCH,
+ generic_dns_callback, &env->r);
+ return env;
+}
+
+static int
+testleak_cleanup(const struct testcase_t *testcase, void *env_)
+{
+ int ok = 0;
+ struct testleak_env_t *env = env_;
+ tt_assert(env);
+#ifdef EVENT__DISABLE_DEBUG_MODE
+ tt_int_op(allocated_chunks, ==, 0);
+#else
+ libevent_global_shutdown();
+ tt_int_op(allocated_chunks, ==, 0);
+#endif
+ ok = 1;
+end:
+ if (env) {
+ if (env->dns_base)
+ evdns_base_free(env->dns_base, 0);
+ if (env->base)
+ event_base_free(env->base);
+ free(env);
+ }
+ return ok;
+}
+
+static struct testcase_setup_t testleak_funcs = {
+ testleak_setup, testleak_cleanup
+};
+
+static void
+test_dbg_leak_cancel(void *env_)
+{
+ /* cancel, loop, free/dns, free/base */
+ struct testleak_env_t *env = env_;
+ int send_err_shutdown = 1;
+ evdns_cancel_request(env->dns_base, env->req);
+ env->req = 0;
+
+ /* `req` is freed in callback, that's why one loop is required. */
+ event_base_loop(env->base, EVLOOP_NONBLOCK);
+
+ /* send_err_shutdown means nothing as soon as our request is
+ * already canceled */
+ evdns_base_free(env->dns_base, send_err_shutdown);
+ env->dns_base = 0;
+ event_base_free(env->base);
+ env->base = 0;
+}
+
+static void
+dbg_leak_resume(void *env_, int cancel, int send_err_shutdown)
+{
+ /* cancel, loop, free/dns, free/base */
+ struct testleak_env_t *env = env_;
+ if (cancel) {
+ evdns_cancel_request(env->dns_base, env->req);
+ tt_assert(!evdns_base_resume(env->dns_base));
+ } else {
+ /* TODO: No nameservers, request can't be processed, must be errored */
+ tt_assert(!evdns_base_resume(env->dns_base));
+ }
+
+ event_base_loop(env->base, EVLOOP_NONBLOCK);
+ /**
+ * Because we don't cancel request, and want our callback to recieve
+ * DNS_ERR_SHUTDOWN, we use deferred callback, and there was:
+ * - one extra malloc(),
+ * @see reply_schedule_callback()
+ * - and one missing free
+ * @see request_finished() (req->handle->pending_cb = 1)
+ * than we don't need to count in testleak_cleanup(), but we can clean them
+ * if we will run loop once again, but *after* evdns base freed.
+ */
+ evdns_base_free(env->dns_base, send_err_shutdown);
+ env->dns_base = 0;
+ event_base_loop(env->base, EVLOOP_NONBLOCK);
+
+end:
+ event_base_free(env->base);
+ env->base = 0;
+}
+
+#define IMPL_DBG_LEAK_RESUME(name, cancel, send_err_shutdown) \
+ static void \
+ test_dbg_leak_##name##_(void *env_) \
+ { \
+ dbg_leak_resume(env_, cancel, send_err_shutdown); \
+ }
+IMPL_DBG_LEAK_RESUME(resume, 0, 0)
+IMPL_DBG_LEAK_RESUME(cancel_and_resume, 1, 0)
+IMPL_DBG_LEAK_RESUME(resume_send_err, 0, 1)
+IMPL_DBG_LEAK_RESUME(cancel_and_resume_send_err, 1, 1)
+
+static void
+test_dbg_leak_shutdown(void *env_)
+{
+ /* free/dns, loop, free/base */
+ struct testleak_env_t *env = env_;
+ int send_err_shutdown = 1;
+
+ /* `req` is freed both with `send_err_shutdown` and without it,
+ * the only difference is `evdns_callback` call */
+ env->req = 0;
+
+ evdns_base_free(env->dns_base, send_err_shutdown);
+ env->dns_base = 0;
+
+ /* `req` is freed in callback, that's why one loop is required */
+ event_base_loop(env->base, EVLOOP_NONBLOCK);
+ event_base_free(env->base);
+ env->base = 0;
+}
+#endif
+
+static void
+test_getaddrinfo_async_cancel_stress(void *ptr)
+{
+ struct event_base *base;
+ struct evdns_base *dns_base = NULL;
+ struct evdns_server_port *server = NULL;
+ evutil_socket_t fd = -1;
+ struct sockaddr_in sin;
+ struct sockaddr_storage ss;
+ ev_socklen_t slen;
+ int i;
+
+ base = event_base_new();
+ dns_base = evdns_base_new(base, 0);
+
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_port = 0;
+ sin.sin_addr.s_addr = htonl(0x7f000001);
+ if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) {
+ tt_abort_perror("socket");
+ }
+ evutil_make_socket_nonblocking(fd);
+ if (bind(fd, (struct sockaddr*)&sin, sizeof(sin))<0) {
+ tt_abort_perror("bind");
+ }
+ server = evdns_add_server_port_with_base(base, fd, 0, gaic_server_cb,
+ base);
+
+ memset(&ss, 0, sizeof(ss));
+ slen = sizeof(ss);
+ if (getsockname(fd, (struct sockaddr*)&ss, &slen)<0) {
+ tt_abort_perror("getsockname");
+ }
+ evdns_base_nameserver_sockaddr_add(dns_base,
+ (struct sockaddr*)&ss, slen, 0);
+
+ for (i = 0; i < 1000; ++i) {
+ gaic_launch(base, dns_base);
+ }
+
+ event_base_dispatch(base);
+
+end:
+ if (dns_base)
+ evdns_base_free(dns_base, 1);
+ if (server)
+ evdns_close_server_port(server);
+ if (base)
+ event_base_free(base);
+ if (fd >= 0)
+ evutil_closesocket(fd);
+}
+
+static void
+dns_client_fail_requests_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+ struct evdns_base *dns = NULL;
+ struct evdns_server_port *dns_port = NULL;
+ ev_uint16_t portnum = 0;
+ char buf[64];
+
+ struct generic_dns_callback_result r[20];
+ int i;
+
+ dns_port = regress_get_dnsserver(base, &portnum, NULL,
+ regress_dns_server_cb, reissue_table);
+ tt_assert(dns_port);
+
+ evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", (int)portnum);
+
+ dns = evdns_base_new(base, EVDNS_BASE_DISABLE_WHEN_INACTIVE);
+ tt_assert(!evdns_base_nameserver_ip_add(dns, buf));
+
+ for (i = 0; i < 20; ++i)
+ evdns_base_resolve_ipv4(dns, "foof.example.com", 0, generic_dns_callback, &r[i]);
+
+ n_replies_left = 20;
+ exit_base = base;
+
+ evdns_base_free(dns, 1 /** fail requests */);
+ /** run defered callbacks, to trigger UAF */
+ event_base_dispatch(base);
+
+ tt_int_op(n_replies_left, ==, 0);
+ for (i = 0; i < 20; ++i)
+ tt_int_op(r[i].result, ==, DNS_ERR_SHUTDOWN);
+
+end:
+ evdns_close_server_port(dns_port);
+}
+
+static void
+getaddrinfo_cb(int err, struct evutil_addrinfo *res, void *ptr)
+{
+ generic_dns_callback(err, 0, 0, 0, NULL, ptr);
+}
+static void
+dns_client_fail_requests_getaddrinfo_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+ struct evdns_base *dns = NULL;
+ struct evdns_server_port *dns_port = NULL;
+ ev_uint16_t portnum = 0;
+ char buf[64];
+
+ struct generic_dns_callback_result r[20];
+ int i;
+
+ dns_port = regress_get_dnsserver(base, &portnum, NULL,
+ regress_dns_server_cb, reissue_table);
+ tt_assert(dns_port);
+
+ evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", (int)portnum);
+
+ dns = evdns_base_new(base, EVDNS_BASE_DISABLE_WHEN_INACTIVE);
+ tt_assert(!evdns_base_nameserver_ip_add(dns, buf));
+
+ for (i = 0; i < 20; ++i)
+ tt_assert(evdns_getaddrinfo(dns, "foof.example.com", "http", NULL, getaddrinfo_cb, &r[i]));
+
+ n_replies_left = 20;
+ exit_base = base;
+
+ evdns_base_free(dns, 1 /** fail requests */);
+ /** run defered callbacks, to trigger UAF */
+ event_base_dispatch(base);
+
+ tt_int_op(n_replies_left, ==, 0);
+ for (i = 0; i < 20; ++i)
+ tt_int_op(r[i].result, ==, EVUTIL_EAI_FAIL);
+
+end:
+ evdns_close_server_port(dns_port);
+}
+
+
+#define DNS_LEGACY(name, flags) \
+ { #name, run_legacy_test_fn, flags|TT_LEGACY, &legacy_setup, \
+ dns_##name }
+
+struct testcase_t dns_testcases[] = {
+ DNS_LEGACY(server, TT_FORK|TT_NEED_BASE),
+ DNS_LEGACY(gethostbyname, TT_FORK|TT_NEED_BASE|TT_NEED_DNS|TT_OFF_BY_DEFAULT),
+ DNS_LEGACY(gethostbyname6, TT_FORK|TT_NEED_BASE|TT_NEED_DNS|TT_OFF_BY_DEFAULT),
+ DNS_LEGACY(gethostbyaddr, TT_FORK|TT_NEED_BASE|TT_NEED_DNS|TT_OFF_BY_DEFAULT),
+ { "resolve_reverse", dns_resolve_reverse, TT_FORK|TT_OFF_BY_DEFAULT, NULL, NULL },
+ { "search", dns_search_test, TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
+ { "search_lower", dns_search_lower_test, TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
+ { "search_cancel", dns_search_cancel_test,
+ TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
+ { "retry", dns_retry_test, TT_FORK|TT_NEED_BASE|TT_NO_LOGS, &basic_setup, NULL },
+ { "retry_disable_when_inactive", dns_retry_disable_when_inactive_test,
+ TT_FORK|TT_NEED_BASE|TT_NO_LOGS, &basic_setup, NULL },
+ { "reissue", dns_reissue_test, TT_FORK|TT_NEED_BASE|TT_NO_LOGS, &basic_setup, NULL },
+ { "reissue_disable_when_inactive", dns_reissue_disable_when_inactive_test,
+ TT_FORK|TT_NEED_BASE|TT_NO_LOGS, &basic_setup, NULL },
+ { "inflight", dns_inflight_test, TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
+ { "bufferevent_connect_hostname", test_bufferevent_connect_hostname,
+ TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
+ { "disable_when_inactive", dns_disable_when_inactive_test,
+ TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
+ { "disable_when_inactive_no_ns", dns_disable_when_inactive_no_ns_test,
+ TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
+
+ { "getaddrinfo_async", test_getaddrinfo_async,
+ TT_FORK|TT_NEED_BASE, &basic_setup, (char*)"" },
+ { "getaddrinfo_cancel_stress", test_getaddrinfo_async_cancel_stress,
+ TT_FORK, NULL, NULL },
+
+#ifdef EVENT_SET_MEM_FUNCTIONS_IMPLEMENTED
+ { "leak_shutdown", test_dbg_leak_shutdown, TT_FORK, &testleak_funcs, NULL },
+ { "leak_cancel", test_dbg_leak_cancel, TT_FORK, &testleak_funcs, NULL },
+
+ { "leak_resume", test_dbg_leak_resume_, TT_FORK, &testleak_funcs, NULL },
+ { "leak_cancel_and_resume", test_dbg_leak_cancel_and_resume_,
+ TT_FORK, &testleak_funcs, NULL },
+ { "leak_resume_send_err", test_dbg_leak_resume_send_err_,
+ TT_FORK, &testleak_funcs, NULL },
+ { "leak_cancel_and_resume_send_err", test_dbg_leak_cancel_and_resume_send_err_,
+ TT_FORK, &testleak_funcs, NULL },
+#endif
+
+ { "client_fail_requests", dns_client_fail_requests_test,
+ TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
+ { "client_fail_requests_getaddrinfo",
+ dns_client_fail_requests_getaddrinfo_test,
+ TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
+
+ END_OF_TESTCASES
+};
+
diff --git a/libs/libevent/docs/test/regress_et.c b/libs/libevent/docs/test/regress_et.c
new file mode 100644
index 0000000000..229a78e2d4
--- /dev/null
+++ b/libs/libevent/docs/test/regress_et.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "../util-internal.h"
+#include "event2/event-config.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#endif
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#ifndef _WIN32
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+#include <errno.h>
+
+#include "event2/event.h"
+#include "event2/util.h"
+
+#include "regress.h"
+
+static int was_et = 0;
+
+static void
+read_cb(evutil_socket_t fd, short event, void *arg)
+{
+ char buf;
+ int len;
+
+ len = recv(fd, &buf, sizeof(buf), 0);
+
+ called++;
+ if (event & EV_ET)
+ was_et = 1;
+
+ if (!len)
+ event_del(arg);
+}
+
+#ifndef SHUT_WR
+#define SHUT_WR 1
+#endif
+
+#ifdef _WIN32
+#define LOCAL_SOCKETPAIR_AF AF_INET
+#else
+#define LOCAL_SOCKETPAIR_AF AF_UNIX
+#endif
+
+static void
+test_edgetriggered(void *et)
+{
+ struct event *ev = NULL;
+ struct event_base *base = NULL;
+ const char *test = "test string";
+ evutil_socket_t pair[2] = {-1,-1};
+ int supports_et;
+
+ /* On Linux 3.2.1 (at least, as patched by Fedora and tested by Nick),
+ * doing a "recv" on an AF_UNIX socket resets the readability of the
+ * socket, even though there is no state change, so we don't actually
+ * get edge-triggered behavior. Yuck! Linux 3.1.9 didn't have this
+ * problem.
+ */
+#ifdef __linux__
+ if (evutil_ersatz_socketpair_(AF_INET, SOCK_STREAM, 0, pair) == -1) {
+ tt_abort_perror("socketpair");
+ }
+#else
+ if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair) == -1) {
+ tt_abort_perror("socketpair");
+ }
+#endif
+
+ called = was_et = 0;
+
+ tt_int_op(send(pair[0], test, (int)strlen(test)+1, 0), >, 0);
+ shutdown(pair[0], SHUT_WR);
+
+ /* Initalize the event library */
+ base = event_base_new();
+
+ if (!strcmp(event_base_get_method(base), "epoll") ||
+ !strcmp(event_base_get_method(base), "epoll (with changelist)") ||
+ !strcmp(event_base_get_method(base), "kqueue"))
+ supports_et = 1;
+ else
+ supports_et = 0;
+
+ TT_BLATHER(("Checking for edge-triggered events with %s, which should %s"
+ "support edge-triggering", event_base_get_method(base),
+ supports_et?"":"not "));
+
+ /* Initalize one event */
+ ev = event_new(base, pair[1], EV_READ|EV_ET|EV_PERSIST, read_cb, &ev);
+
+ event_add(ev, NULL);
+
+ /* We're going to call the dispatch function twice. The first invocation
+ * will read a single byte from pair[1] in either case. If we're edge
+ * triggered, we'll only see the event once (since we only see transitions
+ * from no data to data), so the second invocation of event_base_loop will
+ * do nothing. If we're level triggered, the second invocation of
+ * event_base_loop will also activate the event (because there's still
+ * data to read). */
+ event_base_loop(base,EVLOOP_NONBLOCK|EVLOOP_ONCE);
+ event_base_loop(base,EVLOOP_NONBLOCK|EVLOOP_ONCE);
+
+ if (supports_et) {
+ tt_int_op(called, ==, 1);
+ tt_assert(was_et);
+ } else {
+ tt_int_op(called, ==, 2);
+ tt_assert(!was_et);
+ }
+
+ end:
+ if (ev) {
+ event_del(ev);
+ event_free(ev);
+ }
+ if (base)
+ event_base_free(base);
+ evutil_closesocket(pair[0]);
+ evutil_closesocket(pair[1]);
+}
+
+static void
+test_edgetriggered_mix_error(void *data_)
+{
+ struct basic_test_data *data = data_;
+ struct event_base *base = NULL;
+ struct event *ev_et=NULL, *ev_lt=NULL;
+
+#ifdef EVENT__DISABLE_DEBUG_MODE
+ if (1)
+ tt_skip();
+#endif
+
+ if (!libevent_tests_running_in_debug_mode)
+ event_enable_debug_mode();
+
+ base = event_base_new();
+
+ /* try mixing edge-triggered and level-triggered to make sure it fails*/
+ ev_et = event_new(base, data->pair[0], EV_READ|EV_ET, read_cb, ev_et);
+ tt_assert(ev_et);
+ ev_lt = event_new(base, data->pair[0], EV_READ, read_cb, ev_lt);
+ tt_assert(ev_lt);
+
+ /* Add edge-triggered, then level-triggered. Get an error. */
+ tt_int_op(0, ==, event_add(ev_et, NULL));
+ tt_int_op(-1, ==, event_add(ev_lt, NULL));
+ tt_int_op(EV_READ, ==, event_pending(ev_et, EV_READ, NULL));
+ tt_int_op(0, ==, event_pending(ev_lt, EV_READ, NULL));
+
+ tt_int_op(0, ==, event_del(ev_et));
+ /* Add level-triggered, then edge-triggered. Get an error. */
+ tt_int_op(0, ==, event_add(ev_lt, NULL));
+ tt_int_op(-1, ==, event_add(ev_et, NULL));
+ tt_int_op(EV_READ, ==, event_pending(ev_lt, EV_READ, NULL));
+ tt_int_op(0, ==, event_pending(ev_et, EV_READ, NULL));
+
+end:
+ if (ev_et)
+ event_free(ev_et);
+ if (ev_lt)
+ event_free(ev_lt);
+ if (base)
+ event_base_free(base);
+}
+
+struct testcase_t edgetriggered_testcases[] = {
+ { "et", test_edgetriggered, TT_FORK, NULL, NULL },
+ { "et_mix_error", test_edgetriggered_mix_error,
+ TT_FORK|TT_NEED_SOCKETPAIR|TT_NO_LOGS, &basic_setup, NULL },
+ END_OF_TESTCASES
+};
diff --git a/libs/libevent/docs/test/regress_finalize.c b/libs/libevent/docs/test/regress_finalize.c
new file mode 100644
index 0000000000..552210fe9d
--- /dev/null
+++ b/libs/libevent/docs/test/regress_finalize.c
@@ -0,0 +1,347 @@
+/*
+ * Copyright (c) 2013 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+#include "tinytest.h"
+#include "tinytest_macros.h"
+#include <stdlib.h>
+
+#include "event2/event.h"
+#include "event2/util.h"
+#include "event-internal.h"
+#include "defer-internal.h"
+
+#include "regress.h"
+#include "regress_thread.h"
+
+static void
+timer_callback(evutil_socket_t fd, short what, void *arg)
+{
+ int *int_arg = arg;
+ *int_arg += 1;
+ (void)fd;
+ (void)what;
+}
+static void
+simple_callback(struct event_callback *evcb, void *arg)
+{
+ int *int_arg = arg;
+ *int_arg += 1;
+ (void)evcb;
+}
+static void
+event_finalize_callback_1(struct event *ev, void *arg)
+{
+ int *int_arg = arg;
+ *int_arg += 100;
+ (void)ev;
+}
+static void
+callback_finalize_callback_1(struct event_callback *evcb, void *arg)
+{
+ int *int_arg = arg;
+ *int_arg += 100;
+ (void)evcb;
+}
+
+
+static void
+test_fin_cb_invoked(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+
+ struct event *ev;
+ struct event ev2;
+ struct event_callback evcb;
+ int cb_called = 0;
+ int ev_called = 0;
+
+ const struct timeval ten_sec = {10,0};
+
+ event_deferred_cb_init_(&evcb, 0, simple_callback, &cb_called);
+ ev = evtimer_new(base, timer_callback, &ev_called);
+ /* Just finalize them; don't bother adding. */
+ event_free_finalize(0, ev, event_finalize_callback_1);
+ event_callback_finalize_(base, 0, &evcb, callback_finalize_callback_1);
+
+ event_base_dispatch(base);
+
+ tt_int_op(cb_called, ==, 100);
+ tt_int_op(ev_called, ==, 100);
+
+ ev_called = cb_called = 0;
+ event_base_assert_ok_(base);
+
+ /* Now try it when they're active. (actually, don't finalize: make
+ * sure activation can happen! */
+ ev = evtimer_new(base, timer_callback, &ev_called);
+ event_deferred_cb_init_(&evcb, 0, simple_callback, &cb_called);
+
+ event_active(ev, EV_TIMEOUT, 1);
+ event_callback_activate_(base, &evcb);
+
+ event_base_dispatch(base);
+ tt_int_op(cb_called, ==, 1);
+ tt_int_op(ev_called, ==, 1);
+
+ ev_called = cb_called = 0;
+ event_base_assert_ok_(base);
+
+ /* Great, it worked. Now activate and finalize and make sure only
+ * finalizing happens. */
+ event_active(ev, EV_TIMEOUT, 1);
+ event_callback_activate_(base, &evcb);
+ event_free_finalize(0, ev, event_finalize_callback_1);
+ event_callback_finalize_(base, 0, &evcb, callback_finalize_callback_1);
+
+ event_base_dispatch(base);
+ tt_int_op(cb_called, ==, 100);
+ tt_int_op(ev_called, ==, 100);
+
+ ev_called = 0;
+
+ event_base_assert_ok_(base);
+
+ /* Okay, now add but don't have it become active, and make sure *that*
+ * works. */
+ ev = evtimer_new(base, timer_callback, &ev_called);
+ event_add(ev, &ten_sec);
+ event_free_finalize(0, ev, event_finalize_callback_1);
+
+ event_base_dispatch(base);
+ tt_int_op(ev_called, ==, 100);
+
+ ev_called = 0;
+ event_base_assert_ok_(base);
+
+ /* Now try adding and deleting after finalizing. */
+ ev = evtimer_new(base, timer_callback, &ev_called);
+ evtimer_assign(&ev2, base, timer_callback, &ev_called);
+ event_add(ev, &ten_sec);
+ event_free_finalize(0, ev, event_finalize_callback_1);
+ event_finalize(0, &ev2, event_finalize_callback_1);
+
+ event_add(&ev2, &ten_sec);
+ event_del(ev);
+ event_active(&ev2, EV_TIMEOUT, 1);
+
+ event_base_dispatch(base);
+ tt_int_op(ev_called, ==, 200);
+
+ event_base_assert_ok_(base);
+
+end:
+ ;
+}
+
+#ifndef EVENT__DISABLE_MM_REPLACEMENT
+static void *
+tfff_malloc(size_t n)
+{
+ return malloc(n);
+}
+static void *tfff_p1=NULL, *tfff_p2=NULL;
+static int tfff_p1_freed=0, tfff_p2_freed=0;
+static void
+tfff_free(void *p)
+{
+ if (! p)
+ return;
+ if (p == tfff_p1)
+ ++tfff_p1_freed;
+ if (p == tfff_p2)
+ ++tfff_p2_freed;
+ free(p);
+}
+static void *
+tfff_realloc(void *p, size_t sz)
+{
+ return realloc(p,sz);
+}
+#endif
+
+static void
+test_fin_free_finalize(void *arg)
+{
+#ifdef EVENT__DISABLE_MM_REPLACEMENT
+ tinytest_set_test_skipped_();
+#else
+ struct event_base *base = NULL;
+ struct event *ev, *ev2;
+ int ev_called = 0;
+ int ev2_called = 0;
+
+ (void)arg;
+
+ event_set_mem_functions(tfff_malloc, tfff_realloc, tfff_free);
+
+ base = event_base_new();
+ tt_assert(base);
+
+ ev = evtimer_new(base, timer_callback, &ev_called);
+ ev2 = evtimer_new(base, timer_callback, &ev2_called);
+ tfff_p1 = ev;
+ tfff_p2 = ev2;
+ event_free_finalize(0, ev, event_finalize_callback_1);
+ event_finalize(0, ev2, event_finalize_callback_1);
+
+ event_base_dispatch(base);
+
+ tt_int_op(ev_called, ==, 100);
+ tt_int_op(ev2_called, ==, 100);
+
+ event_base_assert_ok_(base);
+ tt_int_op(tfff_p1_freed, ==, 1);
+ tt_int_op(tfff_p2_freed, ==, 0);
+
+ event_free(ev2);
+
+end:
+ if (base)
+ event_base_free(base);
+#endif
+}
+
+/* For test_fin_within_cb */
+struct event_and_count {
+ struct event *ev;
+ struct event *ev2;
+ int count;
+};
+static void
+event_finalize_callback_2(struct event *ev, void *arg)
+{
+ struct event_and_count *evc = arg;
+ evc->count += 100;
+ event_free(ev);
+}
+static void
+timer_callback_2(evutil_socket_t fd, short what, void *arg)
+{
+ struct event_and_count *evc = arg;
+ event_finalize(0, evc->ev, event_finalize_callback_2);
+ event_finalize(0, evc->ev2, event_finalize_callback_2);
+ ++ evc->count;
+ (void)fd;
+ (void)what;
+}
+
+static void
+test_fin_within_cb(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+
+ struct event_and_count evc1, evc2;
+ evc1.count = evc2.count = 0;
+ evc2.ev2 = evc1.ev = evtimer_new(base, timer_callback_2, &evc1);
+ evc1.ev2 = evc2.ev = evtimer_new(base, timer_callback_2, &evc2);
+
+ /* Activate both. The first one will have its callback run, which
+ * will finalize both of them, preventing the second one's callback
+ * from running. */
+ event_active(evc1.ev, EV_TIMEOUT, 1);
+ event_active(evc2.ev, EV_TIMEOUT, 1);
+
+ event_base_dispatch(base);
+ tt_int_op(evc1.count, ==, 101);
+ tt_int_op(evc2.count, ==, 100);
+
+ event_base_assert_ok_(base);
+ /* Now try with EV_PERSIST events. */
+ evc1.count = evc2.count = 0;
+ evc2.ev2 = evc1.ev = event_new(base, -1, EV_PERSIST, timer_callback_2, &evc1);
+ evc1.ev2 = evc2.ev = event_new(base, -1, EV_PERSIST, timer_callback_2, &evc2);
+
+ event_active(evc1.ev, EV_TIMEOUT, 1);
+ event_active(evc2.ev, EV_TIMEOUT, 1);
+
+ event_base_dispatch(base);
+ tt_int_op(evc1.count, ==, 101);
+ tt_int_op(evc2.count, ==, 100);
+
+ event_base_assert_ok_(base);
+end:
+ ;
+}
+
+#if 0
+static void
+timer_callback_3(evutil_socket_t *fd, short what, void *arg)
+{
+ (void)fd;
+ (void)what;
+
+}
+static void
+test_fin_many(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+
+ struct event *ev1, *ev2;
+ struct event_callback evcb1, evcb2;
+ int ev1_count = 0, ev2_count = 0;
+ int evcb1_count = 0, evcb2_count = 0;
+ struct event_callback *array[4];
+
+ int n;
+
+ /* First attempt: call finalize_many with no events running */
+ ev1 = evtimer_new(base, timer_callback, &ev1_count);
+ ev1 = evtimer_new(base, timer_callback, &ev2_count);
+ event_deferred_cb_init_(&evcb1, 0, simple_callback, &evcb1_called);
+ event_deferred_cb_init_(&evcb2, 0, simple_callback, &evcb2_called);
+ array[0] = &ev1->ev_evcallback;
+ array[1] = &ev2->ev_evcallback;
+ array[2] = &evcb1;
+ array[3] = &evcb2;
+
+
+
+ n = event_callback_finalize_many(base, 4, array,
+ callback_finalize_callback_1);
+
+}
+#endif
+
+
+#define TEST(name, flags) \
+ { #name, test_fin_##name, (flags), &basic_setup, NULL }
+
+struct testcase_t finalize_testcases[] = {
+
+ TEST(cb_invoked, TT_FORK|TT_NEED_BASE),
+ TEST(free_finalize, TT_FORK),
+ TEST(within_cb, TT_FORK|TT_NEED_BASE),
+// TEST(many, TT_FORK|TT_NEED_BASE),
+
+
+ END_OF_TESTCASES
+};
+
diff --git a/libs/libevent/docs/test/regress_http.c b/libs/libevent/docs/test/regress_http.c
new file mode 100644
index 0000000000..cbe7aea34c
--- /dev/null
+++ b/libs/libevent/docs/test/regress_http.c
@@ -0,0 +1,4335 @@
+/*
+ * Copyright (c) 2003-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "util-internal.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#include <windows.h>
+#endif
+
+#include "event2/event-config.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifndef _WIN32
+#include <sys/socket.h>
+#include <signal.h>
+#include <unistd.h>
+#include <netdb.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include "event2/dns.h"
+
+#include "event2/event.h"
+#include "event2/http.h"
+#include "event2/buffer.h"
+#include "event2/bufferevent.h"
+#include "event2/bufferevent_ssl.h"
+#include "event2/util.h"
+#include "event2/listener.h"
+#include "log-internal.h"
+#include "http-internal.h"
+#include "regress.h"
+#include "regress_testutils.h"
+
+static struct evhttp *http;
+/* set if a test needs to call loopexit on a base */
+static struct event_base *exit_base;
+
+static char const BASIC_REQUEST_BODY[] = "This is funny";
+
+#define IMPL_HTTP_REQUEST_ERROR_CB(name, expecting_error) \
+ static void \
+ http_request_error_cb_with_##name##_(enum evhttp_request_error error, \
+ void *arg) \
+ { \
+ if (error != expecting_error) { \
+ fprintf(stderr, "FAILED\n"); \
+ exit(1); \
+ } \
+ test_ok = 1; \
+ }
+IMPL_HTTP_REQUEST_ERROR_CB(cancel, EVREQ_HTTP_REQUEST_CANCEL)
+
+static void http_basic_cb(struct evhttp_request *req, void *arg);
+static void http_large_cb(struct evhttp_request *req, void *arg);
+static void http_chunked_cb(struct evhttp_request *req, void *arg);
+static void http_post_cb(struct evhttp_request *req, void *arg);
+static void http_put_cb(struct evhttp_request *req, void *arg);
+static void http_delete_cb(struct evhttp_request *req, void *arg);
+static void http_delay_cb(struct evhttp_request *req, void *arg);
+static void http_large_delay_cb(struct evhttp_request *req, void *arg);
+static void http_badreq_cb(struct evhttp_request *req, void *arg);
+static void http_dispatcher_cb(struct evhttp_request *req, void *arg);
+static void http_on_complete_cb(struct evhttp_request *req, void *arg);
+
+#define HTTP_BIND_IPV6 1
+#define HTTP_BIND_SSL 2
+static int
+http_bind(struct evhttp *myhttp, ev_uint16_t *pport, int mask)
+{
+ int port;
+ struct evhttp_bound_socket *sock;
+ int ipv6 = mask & HTTP_BIND_IPV6;
+
+ if (ipv6)
+ sock = evhttp_bind_socket_with_handle(myhttp, "::1", *pport);
+ else
+ sock = evhttp_bind_socket_with_handle(myhttp, "127.0.0.1", *pport);
+
+ if (sock == NULL) {
+ if (ipv6)
+ return -1;
+ else
+ event_errx(1, "Could not start web server");
+ }
+
+ port = regress_get_socket_port(evhttp_bound_socket_get_fd(sock));
+ if (port < 0)
+ return -1;
+ *pport = (ev_uint16_t) port;
+
+ return 0;
+}
+
+#ifdef EVENT__HAVE_OPENSSL
+static struct bufferevent *
+https_bev(struct event_base *base, void *arg)
+{
+ SSL *ssl = SSL_new(get_ssl_ctx());
+
+ SSL_use_certificate(ssl, ssl_getcert());
+ SSL_use_PrivateKey(ssl, ssl_getkey());
+
+ return bufferevent_openssl_socket_new(
+ base, -1, ssl, BUFFEREVENT_SSL_ACCEPTING,
+ BEV_OPT_CLOSE_ON_FREE);
+}
+#endif
+static struct evhttp *
+http_setup(ev_uint16_t *pport, struct event_base *base, int mask)
+{
+ struct evhttp *myhttp;
+
+ /* Try a few different ports */
+ myhttp = evhttp_new(base);
+
+ if (http_bind(myhttp, pport, mask) < 0)
+ return NULL;
+#ifdef EVENT__HAVE_OPENSSL
+ if (mask & HTTP_BIND_SSL) {
+ init_ssl();
+ evhttp_set_bevcb(myhttp, https_bev, NULL);
+ }
+#endif
+
+ /* Register a callback for certain types of requests */
+ evhttp_set_cb(myhttp, "/test", http_basic_cb, base);
+ evhttp_set_cb(myhttp, "/large", http_large_cb, base);
+ evhttp_set_cb(myhttp, "/chunked", http_chunked_cb, base);
+ evhttp_set_cb(myhttp, "/streamed", http_chunked_cb, base);
+ evhttp_set_cb(myhttp, "/postit", http_post_cb, base);
+ evhttp_set_cb(myhttp, "/putit", http_put_cb, base);
+ evhttp_set_cb(myhttp, "/deleteit", http_delete_cb, base);
+ evhttp_set_cb(myhttp, "/delay", http_delay_cb, base);
+ evhttp_set_cb(myhttp, "/largedelay", http_large_delay_cb, base);
+ evhttp_set_cb(myhttp, "/badrequest", http_badreq_cb, base);
+ evhttp_set_cb(myhttp, "/oncomplete", http_on_complete_cb, base);
+ evhttp_set_cb(myhttp, "/", http_dispatcher_cb, base);
+ return (myhttp);
+}
+
+#ifndef NI_MAXSERV
+#define NI_MAXSERV 1024
+#endif
+
+static evutil_socket_t
+http_connect(const char *address, unsigned short port)
+{
+ /* Stupid code for connecting */
+ struct evutil_addrinfo ai, *aitop;
+ char strport[NI_MAXSERV];
+
+ struct sockaddr *sa;
+ int slen;
+ evutil_socket_t fd;
+
+ memset(&ai, 0, sizeof(ai));
+ ai.ai_family = AF_INET;
+ ai.ai_socktype = SOCK_STREAM;
+ evutil_snprintf(strport, sizeof(strport), "%d", port);
+ if (evutil_getaddrinfo(address, strport, &ai, &aitop) != 0) {
+ event_warn("getaddrinfo");
+ return (-1);
+ }
+ sa = aitop->ai_addr;
+ slen = aitop->ai_addrlen;
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ if (fd == -1)
+ event_err(1, "socket failed");
+
+ evutil_make_socket_nonblocking(fd);
+ if (connect(fd, sa, slen) == -1) {
+#ifdef _WIN32
+ int tmp_err = WSAGetLastError();
+ if (tmp_err != WSAEINPROGRESS && tmp_err != WSAEINVAL &&
+ tmp_err != WSAEWOULDBLOCK)
+ event_err(1, "connect failed");
+#else
+ if (errno != EINPROGRESS)
+ event_err(1, "connect failed");
+#endif
+ }
+
+ evutil_freeaddrinfo(aitop);
+
+ return (fd);
+}
+
+/* Helper: do a strcmp on the contents of buf and the string s. */
+static int
+evbuffer_datacmp(struct evbuffer *buf, const char *s)
+{
+ size_t b_sz = evbuffer_get_length(buf);
+ size_t s_sz = strlen(s);
+ unsigned char *d;
+ int r;
+
+ if (b_sz < s_sz)
+ return -1;
+
+ d = evbuffer_pullup(buf, s_sz);
+ if ((r = memcmp(d, s, s_sz)))
+ return r;
+
+ if (b_sz > s_sz)
+ return 1;
+ else
+ return 0;
+}
+
+/* Helper: Return true iff buf contains s */
+static int
+evbuffer_contains(struct evbuffer *buf, const char *s)
+{
+ struct evbuffer_ptr ptr;
+ ptr = evbuffer_search(buf, s, strlen(s), NULL);
+ return ptr.pos != -1;
+}
+
+static void
+http_readcb(struct bufferevent *bev, void *arg)
+{
+ const char *what = BASIC_REQUEST_BODY;
+ struct event_base *my_base = arg;
+
+ if (evbuffer_contains(bufferevent_get_input(bev), what)) {
+ struct evhttp_request *req = evhttp_request_new(NULL, NULL);
+ enum message_read_status done;
+
+ /* req->kind = EVHTTP_RESPONSE; */
+ done = evhttp_parse_firstline_(req, bufferevent_get_input(bev));
+ if (done != ALL_DATA_READ)
+ goto out;
+
+ done = evhttp_parse_headers_(req, bufferevent_get_input(bev));
+ if (done != ALL_DATA_READ)
+ goto out;
+
+ if (done == 1 &&
+ evhttp_find_header(evhttp_request_get_input_headers(req),
+ "Content-Type") != NULL)
+ test_ok++;
+
+ out:
+ evhttp_request_free(req);
+ bufferevent_disable(bev, EV_READ);
+ if (exit_base)
+ event_base_loopexit(exit_base, NULL);
+ else if (my_base)
+ event_base_loopexit(my_base, NULL);
+ else {
+ fprintf(stderr, "No way to exit loop!\n");
+ exit(1);
+ }
+ }
+}
+
+static void
+http_writecb(struct bufferevent *bev, void *arg)
+{
+ if (evbuffer_get_length(bufferevent_get_output(bev)) == 0) {
+ /* enable reading of the reply */
+ bufferevent_enable(bev, EV_READ);
+ test_ok++;
+ }
+}
+
+static void
+http_errorcb(struct bufferevent *bev, short what, void *arg)
+{
+ /** For ssl */
+ if (what & BEV_EVENT_CONNECTED)
+ return;
+ test_ok = -2;
+ event_base_loopexit(arg, NULL);
+}
+
+static int found_multi = 0;
+static int found_multi2 = 0;
+
+static void
+http_basic_cb(struct evhttp_request *req, void *arg)
+{
+ struct evbuffer *evb = evbuffer_new();
+ struct evhttp_connection *evcon;
+ int empty = evhttp_find_header(evhttp_request_get_input_headers(req), "Empty") != NULL;
+ event_debug(("%s: called\n", __func__));
+ evbuffer_add_printf(evb, BASIC_REQUEST_BODY);
+
+ evcon = evhttp_request_get_connection(req);
+ tt_assert(evhttp_connection_get_server(evcon) == http);
+
+ /* For multi-line headers test */
+ {
+ const char *multi =
+ evhttp_find_header(evhttp_request_get_input_headers(req),"X-Multi");
+ if (multi) {
+ found_multi = !strcmp(multi,"aaaaaaaa a END");
+ if (strcmp("END", multi + strlen(multi) - 3) == 0)
+ test_ok++;
+ if (evhttp_find_header(evhttp_request_get_input_headers(req), "X-Last"))
+ test_ok++;
+ }
+ }
+ {
+ const char *multi2 =
+ evhttp_find_header(evhttp_request_get_input_headers(req),"X-Multi-Extra-WS");
+ if (multi2) {
+ found_multi2 = !strcmp(multi2,"libevent 2.1");
+ }
+ }
+
+
+ /* injecting a bad content-length */
+ if (evhttp_find_header(evhttp_request_get_input_headers(req), "X-Negative"))
+ evhttp_add_header(evhttp_request_get_output_headers(req),
+ "Content-Length", "-100");
+
+ /* allow sending of an empty reply */
+ evhttp_send_reply(req, HTTP_OK, "Everything is fine",
+ !empty ? evb : NULL);
+
+end:
+ evbuffer_free(evb);
+}
+
+static void
+http_large_cb(struct evhttp_request *req, void *arg)
+{
+ struct evbuffer *evb = evbuffer_new();
+ int i;
+
+ for (i = 0; i < 1<<20; ++i) {
+ evbuffer_add_printf(evb, BASIC_REQUEST_BODY);
+ }
+ evhttp_send_reply(req, HTTP_OK, "Everything is fine", evb);
+ evbuffer_free(evb);
+}
+
+static char const* const CHUNKS[] = {
+ "This is funny",
+ "but not hilarious.",
+ "bwv 1052"
+};
+
+struct chunk_req_state {
+ struct event_base *base;
+ struct evhttp_request *req;
+ int i;
+};
+
+static void
+http_chunked_trickle_cb(evutil_socket_t fd, short events, void *arg)
+{
+ struct evbuffer *evb = evbuffer_new();
+ struct chunk_req_state *state = arg;
+ struct timeval when = { 0, 0 };
+
+ evbuffer_add_printf(evb, "%s", CHUNKS[state->i]);
+ evhttp_send_reply_chunk(state->req, evb);
+ evbuffer_free(evb);
+
+ if (++state->i < (int) (sizeof(CHUNKS)/sizeof(CHUNKS[0]))) {
+ event_base_once(state->base, -1, EV_TIMEOUT,
+ http_chunked_trickle_cb, state, &when);
+ } else {
+ evhttp_send_reply_end(state->req);
+ free(state);
+ }
+}
+
+static void
+http_chunked_cb(struct evhttp_request *req, void *arg)
+{
+ struct timeval when = { 0, 0 };
+ struct chunk_req_state *state = malloc(sizeof(struct chunk_req_state));
+ event_debug(("%s: called\n", __func__));
+
+ memset(state, 0, sizeof(struct chunk_req_state));
+ state->req = req;
+ state->base = arg;
+
+ if (strcmp(evhttp_request_get_uri(req), "/streamed") == 0) {
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Content-Length", "39");
+ }
+
+ /* generate a chunked/streamed reply */
+ evhttp_send_reply_start(req, HTTP_OK, "Everything is fine");
+
+ /* but trickle it across several iterations to ensure we're not
+ * assuming it comes all at once */
+ event_base_once(arg, -1, EV_TIMEOUT, http_chunked_trickle_cb, state, &when);
+}
+
+static void
+http_complete_write(evutil_socket_t fd, short what, void *arg)
+{
+ struct bufferevent *bev = arg;
+ const char *http_request = "host\r\n"
+ "Connection: close\r\n"
+ "\r\n";
+ bufferevent_write(bev, http_request, strlen(http_request));
+}
+
+static struct bufferevent *
+create_bev(struct event_base *base, int fd, int ssl)
+{
+ int flags = BEV_OPT_DEFER_CALLBACKS;
+ struct bufferevent *bev = NULL;
+
+ if (!ssl) {
+ bev = bufferevent_socket_new(base, fd, flags);
+ } else {
+#ifdef EVENT__HAVE_OPENSSL
+ SSL *ssl = SSL_new(get_ssl_ctx());
+ bev = bufferevent_openssl_socket_new(
+ base, fd, ssl, BUFFEREVENT_SSL_CONNECTING, flags);
+ bufferevent_openssl_set_allow_dirty_shutdown(bev, 1);
+#endif
+ }
+
+ return bev;
+}
+
+static void
+http_basic_test_impl(void *arg, int ssl)
+{
+ struct basic_test_data *data = arg;
+ struct timeval tv;
+ struct bufferevent *bev = NULL;
+ evutil_socket_t fd;
+ const char *http_request;
+ ev_uint16_t port = 0, port2 = 0;
+ int server_flags = ssl ? HTTP_BIND_SSL : 0;
+
+ test_ok = 0;
+
+ http = http_setup(&port, data->base, server_flags);
+
+ /* bind to a second socket */
+ if (http_bind(http, &port2, server_flags) == -1) {
+ fprintf(stdout, "FAILED (bind)\n");
+ exit(1);
+ }
+
+ fd = http_connect("127.0.0.1", port);
+
+ /* Stupid thing to send a request */
+ bev = create_bev(data->base, fd, ssl);
+ bufferevent_setcb(bev, http_readcb, http_writecb,
+ http_errorcb, data->base);
+
+ /* first half of the http request */
+ http_request =
+ "GET /test HTTP/1.1\r\n"
+ "Host: some";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+ evutil_timerclear(&tv);
+ tv.tv_usec = 10000;
+ event_base_once(data->base,
+ -1, EV_TIMEOUT, http_complete_write, bev, &tv);
+
+ event_base_dispatch(data->base);
+
+ tt_assert(test_ok == 3);
+
+ /* connect to the second port */
+ bufferevent_free(bev);
+ evutil_closesocket(fd);
+
+ fd = http_connect("127.0.0.1", port2);
+
+ /* Stupid thing to send a request */
+ bev = create_bev(data->base, fd, ssl);
+ bufferevent_setcb(bev, http_readcb, http_writecb,
+ http_errorcb, data->base);
+
+ http_request =
+ "GET /test HTTP/1.1\r\n"
+ "Host: somehost\r\n"
+ "Connection: close\r\n"
+ "\r\n";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+
+ event_base_dispatch(data->base);
+
+ tt_assert(test_ok == 5);
+
+ /* Connect to the second port again. This time, send an absolute uri. */
+ bufferevent_free(bev);
+ evutil_closesocket(fd);
+
+ fd = http_connect("127.0.0.1", port2);
+
+ /* Stupid thing to send a request */
+ bev = create_bev(data->base, fd, ssl);
+ bufferevent_setcb(bev, http_readcb, http_writecb,
+ http_errorcb, data->base);
+
+ http_request =
+ "GET http://somehost.net/test HTTP/1.1\r\n"
+ "Host: somehost\r\n"
+ "Connection: close\r\n"
+ "\r\n";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+
+ event_base_dispatch(data->base);
+
+ tt_assert(test_ok == 7);
+
+ evhttp_free(http);
+ end:
+ if (bev)
+ bufferevent_free(bev);
+}
+static void http_basic_test(void *arg)
+{ return http_basic_test_impl(arg, 0); }
+
+
+static void
+http_delay_reply(evutil_socket_t fd, short what, void *arg)
+{
+ struct evhttp_request *req = arg;
+
+ evhttp_send_reply(req, HTTP_OK, "Everything is fine", NULL);
+
+ ++test_ok;
+}
+
+static void
+http_delay_cb(struct evhttp_request *req, void *arg)
+{
+ struct timeval tv;
+ evutil_timerclear(&tv);
+ tv.tv_sec = 0;
+ tv.tv_usec = 200 * 1000;
+
+ event_base_once(arg, -1, EV_TIMEOUT, http_delay_reply, req, &tv);
+}
+
+static void
+http_badreq_cb(struct evhttp_request *req, void *arg)
+{
+ struct evbuffer *buf = evbuffer_new();
+
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Content-Type", "text/xml; charset=UTF-8");
+ evbuffer_add_printf(buf, "Hello, %s!", "127.0.0.1");
+
+ evhttp_send_reply(req, HTTP_OK, "OK", buf);
+ evbuffer_free(buf);
+}
+
+static void
+http_badreq_errorcb(struct bufferevent *bev, short what, void *arg)
+{
+ event_debug(("%s: called (what=%04x, arg=%p)", __func__, what, arg));
+ /* ignore */
+}
+
+#ifndef SHUT_WR
+#ifdef _WIN32
+#define SHUT_WR SD_SEND
+#else
+#define SHUT_WR 1
+#endif
+#endif
+
+static void
+http_badreq_readcb(struct bufferevent *bev, void *arg)
+{
+ const char *what = "Hello, 127.0.0.1";
+ const char *bad_request = "400 Bad Request";
+
+ if (evbuffer_contains(bufferevent_get_input(bev), bad_request)) {
+ TT_FAIL(("%s:bad request detected", __func__));
+ bufferevent_disable(bev, EV_READ);
+ event_base_loopexit(arg, NULL);
+ return;
+ }
+
+ if (evbuffer_contains(bufferevent_get_input(bev), what)) {
+ struct evhttp_request *req = evhttp_request_new(NULL, NULL);
+ enum message_read_status done;
+
+ /* req->kind = EVHTTP_RESPONSE; */
+ done = evhttp_parse_firstline_(req, bufferevent_get_input(bev));
+ if (done != ALL_DATA_READ)
+ goto out;
+
+ done = evhttp_parse_headers_(req, bufferevent_get_input(bev));
+ if (done != ALL_DATA_READ)
+ goto out;
+
+ if (done == 1 &&
+ evhttp_find_header(evhttp_request_get_input_headers(req),
+ "Content-Type") != NULL)
+ test_ok++;
+
+ out:
+ evhttp_request_free(req);
+ evbuffer_drain(bufferevent_get_input(bev), evbuffer_get_length(bufferevent_get_input(bev)));
+ }
+
+ shutdown(bufferevent_getfd(bev), SHUT_WR);
+}
+
+static void
+http_badreq_successcb(evutil_socket_t fd, short what, void *arg)
+{
+ event_debug(("%s: called (what=%04x, arg=%p)", __func__, what, arg));
+ event_base_loopexit(exit_base, NULL);
+}
+
+static void
+http_bad_request_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct timeval tv;
+ struct bufferevent *bev = NULL;
+ evutil_socket_t fd = -1;
+ const char *http_request;
+ ev_uint16_t port=0, port2=0;
+
+ test_ok = 0;
+ exit_base = data->base;
+
+ http = http_setup(&port, data->base, 0);
+
+ /* bind to a second socket */
+ if (http_bind(http, &port2, 0) == -1)
+ TT_DIE(("Bind socket failed"));
+
+ /* NULL request test */
+ fd = http_connect("127.0.0.1", port);
+ tt_int_op(fd, >=, 0);
+
+ /* Stupid thing to send a request */
+ bev = bufferevent_socket_new(data->base, fd, 0);
+ bufferevent_setcb(bev, http_badreq_readcb, http_writecb,
+ http_badreq_errorcb, data->base);
+ bufferevent_enable(bev, EV_READ);
+
+ /* real NULL request */
+ http_request = "";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+
+ shutdown(fd, SHUT_WR);
+ timerclear(&tv);
+ tv.tv_usec = 10000;
+ event_base_once(data->base, -1, EV_TIMEOUT, http_badreq_successcb, bev, &tv);
+
+ event_base_dispatch(data->base);
+
+ bufferevent_free(bev);
+ evutil_closesocket(fd);
+
+ if (test_ok != 0) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /* Second answer (BAD REQUEST) on connection close */
+
+ /* connect to the second port */
+ fd = http_connect("127.0.0.1", port2);
+
+ /* Stupid thing to send a request */
+ bev = bufferevent_socket_new(data->base, fd, 0);
+ bufferevent_setcb(bev, http_badreq_readcb, http_writecb,
+ http_badreq_errorcb, data->base);
+ bufferevent_enable(bev, EV_READ);
+
+ /* first half of the http request */
+ http_request =
+ "GET /badrequest HTTP/1.0\r\n" \
+ "Connection: Keep-Alive\r\n" \
+ "\r\n";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+
+ timerclear(&tv);
+ tv.tv_usec = 10000;
+ event_base_once(data->base, -1, EV_TIMEOUT, http_badreq_successcb, bev, &tv);
+
+ event_base_dispatch(data->base);
+
+ tt_int_op(test_ok, ==, 2);
+
+end:
+ evhttp_free(http);
+ if (bev)
+ bufferevent_free(bev);
+ if (fd >= 0)
+ evutil_closesocket(fd);
+}
+
+static struct evhttp_connection *delayed_client;
+
+static void
+http_large_delay_cb(struct evhttp_request *req, void *arg)
+{
+ struct timeval tv;
+ evutil_timerclear(&tv);
+ tv.tv_usec = 500000;
+
+ event_base_once(arg, -1, EV_TIMEOUT, http_delay_reply, req, &tv);
+ evhttp_connection_fail_(delayed_client, EVREQ_HTTP_EOF);
+}
+
+/*
+ * HTTP DELETE test, just piggyback on the basic test
+ */
+
+static void
+http_delete_cb(struct evhttp_request *req, void *arg)
+{
+ struct evbuffer *evb = evbuffer_new();
+ int empty = evhttp_find_header(evhttp_request_get_input_headers(req), "Empty") != NULL;
+
+ /* Expecting a DELETE request */
+ if (evhttp_request_get_command(req) != EVHTTP_REQ_DELETE) {
+ fprintf(stdout, "FAILED (delete type)\n");
+ exit(1);
+ }
+
+ event_debug(("%s: called\n", __func__));
+ evbuffer_add_printf(evb, BASIC_REQUEST_BODY);
+
+ /* allow sending of an empty reply */
+ evhttp_send_reply(req, HTTP_OK, "Everything is fine",
+ !empty ? evb : NULL);
+
+ evbuffer_free(evb);
+}
+
+static void
+http_delete_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct bufferevent *bev;
+ evutil_socket_t fd = -1;
+ const char *http_request;
+ ev_uint16_t port = 0;
+
+ test_ok = 0;
+
+ http = http_setup(&port, data->base, 0);
+
+ tt_assert(http);
+ fd = http_connect("127.0.0.1", port);
+ tt_int_op(fd, >=, 0);
+
+ /* Stupid thing to send a request */
+ bev = bufferevent_socket_new(data->base, fd, 0);
+ bufferevent_setcb(bev, http_readcb, http_writecb,
+ http_errorcb, data->base);
+
+ http_request =
+ "DELETE /deleteit HTTP/1.1\r\n"
+ "Host: somehost\r\n"
+ "Connection: close\r\n"
+ "\r\n";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+
+ event_base_dispatch(data->base);
+
+ bufferevent_free(bev);
+ evutil_closesocket(fd);
+ fd = -1;
+
+ evhttp_free(http);
+
+ tt_int_op(test_ok, ==, 2);
+ end:
+ if (fd >= 0)
+ evutil_closesocket(fd);
+}
+
+static void
+http_sent_cb(struct evhttp_request *req, void *arg)
+{
+ ev_uintptr_t val = (ev_uintptr_t)arg;
+ struct evbuffer *b;
+
+ if (val != 0xDEADBEEF) {
+ fprintf(stdout, "FAILED on_complete_cb argument\n");
+ exit(1);
+ }
+
+ b = evhttp_request_get_output_buffer(req);
+ if (evbuffer_get_length(b) != 0) {
+ fprintf(stdout, "FAILED on_complete_cb output buffer not written\n");
+ exit(1);
+ }
+
+ event_debug(("%s: called\n", __func__));
+
+ ++test_ok;
+}
+
+static void
+http_on_complete_cb(struct evhttp_request *req, void *arg)
+{
+ struct evbuffer *evb = evbuffer_new();
+
+ evhttp_request_set_on_complete_cb(req, http_sent_cb, (void *)0xDEADBEEF);
+
+ event_debug(("%s: called\n", __func__));
+ evbuffer_add_printf(evb, BASIC_REQUEST_BODY);
+
+ /* allow sending of an empty reply */
+ evhttp_send_reply(req, HTTP_OK, "Everything is fine", evb);
+
+ evbuffer_free(evb);
+
+ ++test_ok;
+}
+
+static void
+http_on_complete_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct bufferevent *bev;
+ evutil_socket_t fd = -1;
+ const char *http_request;
+ ev_uint16_t port = 0;
+
+ test_ok = 0;
+
+ http = http_setup(&port, data->base, 0);
+
+ fd = http_connect("127.0.0.1", port);
+ tt_int_op(fd, >=, 0);
+
+ /* Stupid thing to send a request */
+ bev = bufferevent_socket_new(data->base, fd, 0);
+ bufferevent_setcb(bev, http_readcb, http_writecb,
+ http_errorcb, data->base);
+
+ http_request =
+ "GET /oncomplete HTTP/1.1\r\n"
+ "Host: somehost\r\n"
+ "Connection: close\r\n"
+ "\r\n";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+
+ event_base_dispatch(data->base);
+
+ bufferevent_free(bev);
+
+ evhttp_free(http);
+
+ tt_int_op(test_ok, ==, 4);
+ end:
+ if (fd >= 0)
+ evutil_closesocket(fd);
+}
+
+static void
+http_allowed_methods_eventcb(struct bufferevent *bev, short what, void *arg)
+{
+ char **output = arg;
+ if ((what & (BEV_EVENT_ERROR|BEV_EVENT_EOF))) {
+ char buf[4096];
+ int n;
+ n = evbuffer_remove(bufferevent_get_input(bev), buf,
+ sizeof(buf)-1);
+ if (n >= 0) {
+ buf[n]='\0';
+ if (*output)
+ free(*output);
+ *output = strdup(buf);
+ }
+ event_base_loopexit(exit_base, NULL);
+ }
+}
+
+static void
+http_allowed_methods_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct bufferevent *bev1, *bev2, *bev3;
+ evutil_socket_t fd1=-1, fd2=-1, fd3=-1;
+ const char *http_request;
+ char *result1=NULL, *result2=NULL, *result3=NULL;
+ ev_uint16_t port = 0;
+
+ exit_base = data->base;
+ test_ok = 0;
+
+ http = http_setup(&port, data->base, 0);
+
+ fd1 = http_connect("127.0.0.1", port);
+ tt_int_op(fd1, >=, 0);
+
+ /* GET is out; PATCH is in. */
+ evhttp_set_allowed_methods(http, EVHTTP_REQ_PATCH);
+
+ /* Stupid thing to send a request */
+ bev1 = bufferevent_socket_new(data->base, fd1, 0);
+ bufferevent_enable(bev1, EV_READ|EV_WRITE);
+ bufferevent_setcb(bev1, NULL, NULL,
+ http_allowed_methods_eventcb, &result1);
+
+ http_request =
+ "GET /index.html HTTP/1.1\r\n"
+ "Host: somehost\r\n"
+ "Connection: close\r\n"
+ "\r\n";
+
+ bufferevent_write(bev1, http_request, strlen(http_request));
+
+ event_base_dispatch(data->base);
+
+ fd2 = http_connect("127.0.0.1", port);
+ tt_int_op(fd2, >=, 0);
+
+ bev2 = bufferevent_socket_new(data->base, fd2, 0);
+ bufferevent_enable(bev2, EV_READ|EV_WRITE);
+ bufferevent_setcb(bev2, NULL, NULL,
+ http_allowed_methods_eventcb, &result2);
+
+ http_request =
+ "PATCH /test HTTP/1.1\r\n"
+ "Host: somehost\r\n"
+ "Connection: close\r\n"
+ "\r\n";
+
+ bufferevent_write(bev2, http_request, strlen(http_request));
+
+ event_base_dispatch(data->base);
+
+ fd3 = http_connect("127.0.0.1", port);
+ tt_int_op(fd3, >=, 0);
+
+ bev3 = bufferevent_socket_new(data->base, fd3, 0);
+ bufferevent_enable(bev3, EV_READ|EV_WRITE);
+ bufferevent_setcb(bev3, NULL, NULL,
+ http_allowed_methods_eventcb, &result3);
+
+ http_request =
+ "FLOOP /test HTTP/1.1\r\n"
+ "Host: somehost\r\n"
+ "Connection: close\r\n"
+ "\r\n";
+
+ bufferevent_write(bev3, http_request, strlen(http_request));
+
+ event_base_dispatch(data->base);
+
+ bufferevent_free(bev1);
+ bufferevent_free(bev2);
+ bufferevent_free(bev3);
+
+ evhttp_free(http);
+
+ /* Method known but disallowed */
+ tt_assert(result1);
+ tt_assert(!strncmp(result1, "HTTP/1.1 501 ", strlen("HTTP/1.1 501 ")));
+
+ /* Method known and allowed */
+ tt_assert(result2);
+ tt_assert(!strncmp(result2, "HTTP/1.1 200 ", strlen("HTTP/1.1 200 ")));
+
+ /* Method unknown */
+ tt_assert(result3);
+ tt_assert(!strncmp(result3, "HTTP/1.1 501 ", strlen("HTTP/1.1 501 ")));
+
+ end:
+ if (result1)
+ free(result1);
+ if (result2)
+ free(result2);
+ if (result3)
+ free(result3);
+ if (fd1 >= 0)
+ evutil_closesocket(fd1);
+ if (fd2 >= 0)
+ evutil_closesocket(fd2);
+ if (fd3 >= 0)
+ evutil_closesocket(fd3);
+}
+
+static void http_request_no_action_done(struct evhttp_request *, void *);
+static void http_request_done(struct evhttp_request *, void *);
+static void http_request_empty_done(struct evhttp_request *, void *);
+
+static void
+http_connection_test_(struct basic_test_data *data, int persistent,
+ const char *address, struct evdns_base *dnsbase, int ipv6, int family)
+{
+ ev_uint16_t port = 0;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+
+ test_ok = 0;
+
+ http = http_setup(&port, data->base, ipv6);
+ if (!http && ipv6) {
+ tt_skip();
+ }
+ tt_assert(http);
+
+ evcon = evhttp_connection_base_new(data->base, dnsbase, address, port);
+ tt_assert(evcon);
+ evhttp_connection_set_family(evcon, family);
+
+ tt_assert(evhttp_connection_get_base(evcon) == data->base);
+
+ exit_base = data->base;
+
+ tt_assert(evhttp_connection_get_server(evcon) == NULL);
+
+ /*
+ * At this point, we want to schedule a request to the HTTP
+ * server using our make request method.
+ */
+ req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ event_base_dispatch(data->base);
+
+ tt_assert(test_ok);
+
+ /* try to make another request over the same connection */
+ test_ok = 0;
+
+ req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+
+ /*
+ * if our connections are not supposed to be persistent; request
+ * a close from the server.
+ */
+ if (!persistent)
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Connection", "close");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+ tt_abort_msg("couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ /* make another request: request empty reply */
+ test_ok = 0;
+
+ req = evhttp_request_new(http_request_empty_done, data->base);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Empty", "itis");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ end:
+ if (evcon)
+ evhttp_connection_free(evcon);
+ if (http)
+ evhttp_free(http);
+}
+
+static void
+http_connection_test(void *arg)
+{
+ http_connection_test_(arg, 0, "127.0.0.1", NULL, 0, AF_UNSPEC);
+}
+static void
+http_persist_connection_test(void *arg)
+{
+ http_connection_test_(arg, 1, "127.0.0.1", NULL, 0, AF_UNSPEC);
+}
+
+static struct regress_dns_server_table search_table[] = {
+ { "localhost", "A", "127.0.0.1", 0, 0 },
+ { NULL, NULL, NULL, 0, 0 }
+};
+
+static void
+http_connection_async_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ ev_uint16_t port = 0;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+ struct evdns_base *dns_base = NULL;
+ ev_uint16_t portnum = 0;
+ char address[64];
+
+ exit_base = data->base;
+ tt_assert(regress_dnsserver(data->base, &portnum, search_table));
+
+ dns_base = evdns_base_new(data->base, 0/* init name servers */);
+ tt_assert(dns_base);
+
+ /* Add ourself as the only nameserver, and make sure we really are
+ * the only nameserver. */
+ evutil_snprintf(address, sizeof(address), "127.0.0.1:%d", portnum);
+ evdns_base_nameserver_ip_add(dns_base, address);
+
+ test_ok = 0;
+
+ http = http_setup(&port, data->base, 0);
+
+ evcon = evhttp_connection_base_new(data->base, dns_base, "127.0.0.1", port);
+ tt_assert(evcon);
+
+ /*
+ * At this point, we want to schedule a request to the HTTP
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ event_base_dispatch(data->base);
+
+ tt_assert(test_ok);
+
+ /* try to make another request over the same connection */
+ test_ok = 0;
+
+ req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+
+ /*
+ * if our connections are not supposed to be persistent; request
+ * a close from the server.
+ */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Connection", "close");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+ tt_abort_msg("couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ /* make another request: request empty reply */
+ test_ok = 0;
+
+ req = evhttp_request_new(http_request_empty_done, data->base);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Empty", "itis");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ end:
+ if (evcon)
+ evhttp_connection_free(evcon);
+ if (http)
+ evhttp_free(http);
+ if (dns_base)
+ evdns_base_free(dns_base, 0);
+ regress_clean_dnsserver();
+}
+
+static void
+http_autofree_connection_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ ev_uint16_t port = 0;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req[2] = { NULL };
+
+ test_ok = 0;
+ http = http_setup(&port, data->base, 0);
+
+ evcon = evhttp_connection_base_new(data->base, NULL, "127.0.0.1", port);
+ tt_assert(evcon);
+
+ /*
+ * At this point, we want to schedule two request to the HTTP
+ * server using our make request method.
+ */
+ req[0] = evhttp_request_new(http_request_empty_done, data->base);
+ req[1] = evhttp_request_new(http_request_empty_done, data->base);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req[0]), "Host", "somehost");
+ evhttp_add_header(evhttp_request_get_output_headers(req[0]), "Connection", "close");
+ evhttp_add_header(evhttp_request_get_output_headers(req[0]), "Empty", "itis");
+ evhttp_add_header(evhttp_request_get_output_headers(req[1]), "Host", "somehost");
+ evhttp_add_header(evhttp_request_get_output_headers(req[1]), "Connection", "close");
+ evhttp_add_header(evhttp_request_get_output_headers(req[1]), "Empty", "itis");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req[0], EVHTTP_REQ_GET, "/test") == -1) {
+ tt_abort_msg("couldn't make request");
+ }
+ if (evhttp_make_request(evcon, req[1], EVHTTP_REQ_GET, "/test") == -1) {
+ tt_abort_msg("couldn't make request");
+ }
+
+ /*
+ * Tell libevent to free the connection when the request completes
+ * We then set the evcon pointer to NULL since we don't want to free it
+ * when this function ends.
+ */
+ evhttp_connection_free_on_completion(evcon);
+ evcon = NULL;
+
+ event_base_dispatch(data->base);
+
+ /* at this point, the http server should have no connection */
+ tt_assert(TAILQ_FIRST(&http->connections) == NULL);
+
+ end:
+ if (evcon)
+ evhttp_connection_free(evcon);
+ if (http)
+ evhttp_free(http);
+}
+
+static void
+http_request_never_call(struct evhttp_request *req, void *arg)
+{
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+}
+
+static void
+http_do_cancel(evutil_socket_t fd, short what, void *arg)
+{
+ struct evhttp_request *req = arg;
+ struct timeval tv;
+ struct event_base *base;
+ evutil_timerclear(&tv);
+ tv.tv_sec = 0;
+ tv.tv_usec = 500 * 1000;
+
+ base = evhttp_connection_get_base(evhttp_request_get_connection(req));
+ evhttp_cancel_request(req);
+
+ event_base_loopexit(base, &tv);
+
+ ++test_ok;
+}
+
+static void
+http_cancel_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ ev_uint16_t port = 0;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+ struct timeval tv;
+
+ exit_base = data->base;
+
+ test_ok = 0;
+
+ http = http_setup(&port, data->base, 0);
+
+ evcon = evhttp_connection_base_new(data->base, NULL, "127.0.0.1", port);
+ tt_assert(evcon);
+
+ /*
+ * At this point, we want to schedule a request to the HTTP
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(http_request_never_call, NULL);
+ evhttp_request_set_error_cb(req, http_request_error_cb_with_cancel_);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+
+ /* We give ownership of the request to the connection */
+ tt_int_op(evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/delay"),
+ !=, -1);
+
+ evutil_timerclear(&tv);
+ tv.tv_sec = 0;
+ tv.tv_usec = 100 * 1000;
+
+ event_base_once(data->base, -1, EV_TIMEOUT, http_do_cancel, req, &tv);
+
+ event_base_dispatch(data->base);
+
+ tt_int_op(test_ok, ==, 3);
+
+ /* try to make another request over the same connection */
+ test_ok = 0;
+
+ req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+
+ /* We give ownership of the request to the connection */
+ tt_int_op(evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test"),
+ !=, -1);
+
+ event_base_dispatch(data->base);
+
+ /* make another request: request empty reply */
+ test_ok = 0;
+
+ req = evhttp_request_new(http_request_empty_done, data->base);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Empty", "itis");
+
+ /* We give ownership of the request to the connection */
+ tt_int_op(evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test"),
+ !=, -1);
+
+ event_base_dispatch(data->base);
+
+ end:
+ if (evcon)
+ evhttp_connection_free(evcon);
+ if (http)
+ evhttp_free(http);
+}
+
+static void
+http_request_no_action_done(struct evhttp_request *req, void *arg)
+{
+ EVUTIL_ASSERT(exit_base);
+ event_base_loopexit(exit_base, NULL);
+}
+
+static void
+http_request_done(struct evhttp_request *req, void *arg)
+{
+ const char *what = arg;
+
+ if (evhttp_request_get_response_code(req) != HTTP_OK) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (evhttp_find_header(evhttp_request_get_input_headers(req), "Content-Type") == NULL) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (evbuffer_get_length(evhttp_request_get_input_buffer(req)) != strlen(what)) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (evbuffer_datacmp(evhttp_request_get_input_buffer(req), what) != 0) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ test_ok = 1;
+ EVUTIL_ASSERT(exit_base);
+ event_base_loopexit(exit_base, NULL);
+}
+
+static void
+http_request_expect_error(struct evhttp_request *req, void *arg)
+{
+ if (evhttp_request_get_response_code(req) == HTTP_OK) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ test_ok = 1;
+ EVUTIL_ASSERT(arg);
+ event_base_loopexit(arg, NULL);
+}
+
+/* test virtual hosts */
+static void
+http_virtual_host_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ ev_uint16_t port = 0;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+ struct evhttp *second = NULL, *third = NULL;
+ evutil_socket_t fd;
+ struct bufferevent *bev;
+ const char *http_request;
+
+ exit_base = data->base;
+
+ http = http_setup(&port, data->base, 0);
+
+ /* virtual host */
+ second = evhttp_new(NULL);
+ evhttp_set_cb(second, "/funnybunny", http_basic_cb, NULL);
+ third = evhttp_new(NULL);
+ evhttp_set_cb(third, "/blackcoffee", http_basic_cb, NULL);
+
+ if (evhttp_add_virtual_host(http, "foo.com", second) == -1) {
+ tt_abort_msg("Couldn't add vhost");
+ }
+
+ if (evhttp_add_virtual_host(http, "bar.*.foo.com", third) == -1) {
+ tt_abort_msg("Couldn't add wildcarded vhost");
+ }
+
+ /* add some aliases to the vhosts */
+ tt_assert(evhttp_add_server_alias(second, "manolito.info") == 0);
+ tt_assert(evhttp_add_server_alias(third, "bonkers.org") == 0);
+
+ evcon = evhttp_connection_base_new(data->base, NULL, "127.0.0.1", port);
+ tt_assert(evcon);
+
+ /* make a request with a different host and expect an error */
+ req = evhttp_request_new(http_request_expect_error, data->base);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET,
+ "/funnybunny") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ tt_assert(test_ok == 1);
+
+ test_ok = 0;
+
+ /* make a request with the right host and expect a response */
+ req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "foo.com");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET,
+ "/funnybunny") == -1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ event_base_dispatch(data->base);
+
+ tt_assert(test_ok == 1);
+
+ test_ok = 0;
+
+ /* make a request with the right host and expect a response */
+ req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "bar.magic.foo.com");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET,
+ "/blackcoffee") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ tt_assert(test_ok == 1)
+
+ test_ok = 0;
+
+ /* make a request with the right host and expect a response */
+ req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "manolito.info");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET,
+ "/funnybunny") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ tt_assert(test_ok == 1)
+
+ test_ok = 0;
+
+ /* make a request with the right host and expect a response */
+ req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY);
+
+ /* Add the Host header. This time with the optional port. */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "bonkers.org:8000");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET,
+ "/blackcoffee") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ tt_assert(test_ok == 1)
+
+ test_ok = 0;
+
+ /* Now make a raw request with an absolute URI. */
+ fd = http_connect("127.0.0.1", port);
+
+ /* Stupid thing to send a request */
+ bev = bufferevent_socket_new(data->base, fd, 0);
+ bufferevent_setcb(bev, http_readcb, http_writecb,
+ http_errorcb, NULL);
+
+ /* The host in the URI should override the Host: header */
+ http_request =
+ "GET http://manolito.info/funnybunny HTTP/1.1\r\n"
+ "Host: somehost\r\n"
+ "Connection: close\r\n"
+ "\r\n";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+
+ event_base_dispatch(data->base);
+
+ tt_int_op(test_ok, ==, 2);
+
+ bufferevent_free(bev);
+ evutil_closesocket(fd);
+
+ end:
+ if (evcon)
+ evhttp_connection_free(evcon);
+ if (http)
+ evhttp_free(http);
+}
+
+
+/* test date header and content length */
+
+static void
+http_request_empty_done(struct evhttp_request *req, void *arg)
+{
+ if (evhttp_request_get_response_code(req) != HTTP_OK) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (evhttp_find_header(evhttp_request_get_input_headers(req), "Date") == NULL) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+
+ if (evhttp_find_header(evhttp_request_get_input_headers(req), "Content-Length") == NULL) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (strcmp(evhttp_find_header(evhttp_request_get_input_headers(req), "Content-Length"),
+ "0")) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (evbuffer_get_length(evhttp_request_get_input_buffer(req)) != 0) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ test_ok = 1;
+ EVUTIL_ASSERT(arg);
+ event_base_loopexit(arg, NULL);
+}
+
+/*
+ * HTTP DISPATCHER test
+ */
+
+void
+http_dispatcher_cb(struct evhttp_request *req, void *arg)
+{
+
+ struct evbuffer *evb = evbuffer_new();
+ event_debug(("%s: called\n", __func__));
+ evbuffer_add_printf(evb, "DISPATCHER_TEST");
+
+ evhttp_send_reply(req, HTTP_OK, "Everything is fine", evb);
+
+ evbuffer_free(evb);
+}
+
+static void
+http_dispatcher_test_done(struct evhttp_request *req, void *arg)
+{
+ struct event_base *base = arg;
+ const char *what = "DISPATCHER_TEST";
+
+ if (!req) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (evhttp_request_get_response_code(req) != HTTP_OK) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (evhttp_find_header(evhttp_request_get_input_headers(req), "Content-Type") == NULL) {
+ fprintf(stderr, "FAILED (content type)\n");
+ exit(1);
+ }
+
+ if (evbuffer_get_length(evhttp_request_get_input_buffer(req)) != strlen(what)) {
+ fprintf(stderr, "FAILED (length %lu vs %lu)\n",
+ (unsigned long)evbuffer_get_length(evhttp_request_get_input_buffer(req)), (unsigned long)strlen(what));
+ exit(1);
+ }
+
+ if (evbuffer_datacmp(evhttp_request_get_input_buffer(req), what) != 0) {
+ fprintf(stderr, "FAILED (data)\n");
+ exit(1);
+ }
+
+ test_ok = 1;
+ event_base_loopexit(base, NULL);
+}
+
+static void
+http_dispatcher_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ ev_uint16_t port = 0;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+
+ test_ok = 0;
+
+ http = http_setup(&port, data->base, 0);
+
+ evcon = evhttp_connection_base_new(data->base, NULL, "127.0.0.1", port);
+ tt_assert(evcon);
+
+ /* also bind to local host */
+ evhttp_connection_set_local_address(evcon, "127.0.0.1");
+
+ /*
+ * At this point, we want to schedule an HTTP GET request
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(http_dispatcher_test_done, data->base);
+ tt_assert(req);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/?arg=val") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ end:
+ if (evcon)
+ evhttp_connection_free(evcon);
+ if (http)
+ evhttp_free(http);
+}
+
+/*
+ * HTTP POST test.
+ */
+
+void http_postrequest_done(struct evhttp_request *, void *);
+
+#define POST_DATA "Okay. Not really printf"
+
+static void
+http_post_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ ev_uint16_t port = 0;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+
+ test_ok = 0;
+
+ http = http_setup(&port, data->base, 0);
+
+ evcon = evhttp_connection_base_new(data->base, NULL, "127.0.0.1", port);
+ tt_assert(evcon);
+
+ /*
+ * At this point, we want to schedule an HTTP POST request
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(http_postrequest_done, data->base);
+ tt_assert(req);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+ evbuffer_add_printf(evhttp_request_get_output_buffer(req), POST_DATA);
+
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_POST, "/postit") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ tt_int_op(test_ok, ==, 1);
+
+ test_ok = 0;
+
+ req = evhttp_request_new(http_postrequest_done, data->base);
+ tt_assert(req);
+
+ /* Now try with 100-continue. */
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Expect", "100-continue");
+ evbuffer_add_printf(evhttp_request_get_output_buffer(req), POST_DATA);
+
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_POST, "/postit") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ tt_int_op(test_ok, ==, 1);
+
+ evhttp_connection_free(evcon);
+ evhttp_free(http);
+
+ end:
+ ;
+}
+
+void
+http_post_cb(struct evhttp_request *req, void *arg)
+{
+ struct evbuffer *evb;
+ event_debug(("%s: called\n", __func__));
+
+ /* Yes, we are expecting a post request */
+ if (evhttp_request_get_command(req) != EVHTTP_REQ_POST) {
+ fprintf(stdout, "FAILED (post type)\n");
+ exit(1);
+ }
+
+ if (evbuffer_get_length(evhttp_request_get_input_buffer(req)) != strlen(POST_DATA)) {
+ fprintf(stdout, "FAILED (length: %lu vs %lu)\n",
+ (unsigned long) evbuffer_get_length(evhttp_request_get_input_buffer(req)), (unsigned long) strlen(POST_DATA));
+ exit(1);
+ }
+
+ if (evbuffer_datacmp(evhttp_request_get_input_buffer(req), POST_DATA) != 0) {
+ fprintf(stdout, "FAILED (data)\n");
+ fprintf(stdout, "Got :%s\n", evbuffer_pullup(evhttp_request_get_input_buffer(req),-1));
+ fprintf(stdout, "Want:%s\n", POST_DATA);
+ exit(1);
+ }
+
+ evb = evbuffer_new();
+ evbuffer_add_printf(evb, BASIC_REQUEST_BODY);
+
+ evhttp_send_reply(req, HTTP_OK, "Everything is fine", evb);
+
+ evbuffer_free(evb);
+}
+
+void
+http_postrequest_done(struct evhttp_request *req, void *arg)
+{
+ const char *what = BASIC_REQUEST_BODY;
+ struct event_base *base = arg;
+
+ if (req == NULL) {
+ fprintf(stderr, "FAILED (timeout)\n");
+ exit(1);
+ }
+
+ if (evhttp_request_get_response_code(req) != HTTP_OK) {
+
+ fprintf(stderr, "FAILED (response code)\n");
+ exit(1);
+ }
+
+ if (evhttp_find_header(evhttp_request_get_input_headers(req), "Content-Type") == NULL) {
+ fprintf(stderr, "FAILED (content type)\n");
+ exit(1);
+ }
+
+ if (evbuffer_get_length(evhttp_request_get_input_buffer(req)) != strlen(what)) {
+ fprintf(stderr, "FAILED (length %lu vs %lu)\n",
+ (unsigned long)evbuffer_get_length(evhttp_request_get_input_buffer(req)), (unsigned long)strlen(what));
+ exit(1);
+ }
+
+ if (evbuffer_datacmp(evhttp_request_get_input_buffer(req), what) != 0) {
+ fprintf(stderr, "FAILED (data)\n");
+ exit(1);
+ }
+
+ test_ok = 1;
+ event_base_loopexit(base, NULL);
+}
+
+/*
+ * HTTP PUT test, basically just like POST, but ...
+ */
+
+void http_putrequest_done(struct evhttp_request *, void *);
+
+#define PUT_DATA "Hi, I'm some PUT data"
+
+static void
+http_put_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ ev_uint16_t port = 0;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+
+ test_ok = 0;
+
+ http = http_setup(&port, data->base, 0);
+
+ evcon = evhttp_connection_base_new(data->base, NULL, "127.0.0.1", port);
+ tt_assert(evcon);
+
+ /*
+ * Schedule the HTTP PUT request
+ */
+
+ req = evhttp_request_new(http_putrequest_done, data->base);
+ tt_assert(req);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "someotherhost");
+ evbuffer_add_printf(evhttp_request_get_output_buffer(req), PUT_DATA);
+
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_PUT, "/putit") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ evhttp_connection_free(evcon);
+ evhttp_free(http);
+
+ tt_int_op(test_ok, ==, 1);
+ end:
+ ;
+}
+
+void
+http_put_cb(struct evhttp_request *req, void *arg)
+{
+ struct evbuffer *evb;
+ event_debug(("%s: called\n", __func__));
+
+ /* Expecting a PUT request */
+ if (evhttp_request_get_command(req) != EVHTTP_REQ_PUT) {
+ fprintf(stdout, "FAILED (put type)\n");
+ exit(1);
+ }
+
+ if (evbuffer_get_length(evhttp_request_get_input_buffer(req)) != strlen(PUT_DATA)) {
+ fprintf(stdout, "FAILED (length: %lu vs %lu)\n",
+ (unsigned long)evbuffer_get_length(evhttp_request_get_input_buffer(req)), (unsigned long)strlen(PUT_DATA));
+ exit(1);
+ }
+
+ if (evbuffer_datacmp(evhttp_request_get_input_buffer(req), PUT_DATA) != 0) {
+ fprintf(stdout, "FAILED (data)\n");
+ fprintf(stdout, "Got :%s\n", evbuffer_pullup(evhttp_request_get_input_buffer(req),-1));
+ fprintf(stdout, "Want:%s\n", PUT_DATA);
+ exit(1);
+ }
+
+ evb = evbuffer_new();
+ evbuffer_add_printf(evb, "That ain't funny");
+
+ evhttp_send_reply(req, HTTP_OK, "Everything is great", evb);
+
+ evbuffer_free(evb);
+}
+
+void
+http_putrequest_done(struct evhttp_request *req, void *arg)
+{
+ struct event_base *base = arg;
+ const char *what = "That ain't funny";
+
+ if (req == NULL) {
+ fprintf(stderr, "FAILED (timeout)\n");
+ exit(1);
+ }
+
+ if (evhttp_request_get_response_code(req) != HTTP_OK) {
+
+ fprintf(stderr, "FAILED (response code)\n");
+ exit(1);
+ }
+
+ if (evhttp_find_header(evhttp_request_get_input_headers(req), "Content-Type") == NULL) {
+ fprintf(stderr, "FAILED (content type)\n");
+ exit(1);
+ }
+
+ if (evbuffer_get_length(evhttp_request_get_input_buffer(req)) != strlen(what)) {
+ fprintf(stderr, "FAILED (length %lu vs %lu)\n",
+ (unsigned long)evbuffer_get_length(evhttp_request_get_input_buffer(req)), (unsigned long)strlen(what));
+ exit(1);
+ }
+
+
+ if (evbuffer_datacmp(evhttp_request_get_input_buffer(req), what) != 0) {
+ fprintf(stderr, "FAILED (data)\n");
+ exit(1);
+ }
+
+ test_ok = 1;
+ event_base_loopexit(base, NULL);
+}
+
+static void
+http_failure_readcb(struct bufferevent *bev, void *arg)
+{
+ const char *what = "400 Bad Request";
+ if (evbuffer_contains(bufferevent_get_input(bev), what)) {
+ test_ok = 2;
+ bufferevent_disable(bev, EV_READ);
+ event_base_loopexit(arg, NULL);
+ }
+}
+
+/*
+ * Testing that the HTTP server can deal with a malformed request.
+ */
+static void
+http_failure_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct bufferevent *bev;
+ evutil_socket_t fd = -1;
+ const char *http_request;
+ ev_uint16_t port = 0;
+
+ test_ok = 0;
+
+ http = http_setup(&port, data->base, 0);
+
+ fd = http_connect("127.0.0.1", port);
+ tt_int_op(fd, >=, 0);
+
+ /* Stupid thing to send a request */
+ bev = bufferevent_socket_new(data->base, fd, 0);
+ bufferevent_setcb(bev, http_failure_readcb, http_writecb,
+ http_errorcb, data->base);
+
+ http_request = "illegal request\r\n";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+
+ event_base_dispatch(data->base);
+
+ bufferevent_free(bev);
+
+ evhttp_free(http);
+
+ tt_int_op(test_ok, ==, 2);
+ end:
+ if (fd >= 0)
+ evutil_closesocket(fd);
+}
+
+static void
+close_detect_done(struct evhttp_request *req, void *arg)
+{
+ struct timeval tv;
+ tt_assert(req);
+ tt_assert(evhttp_request_get_response_code(req) == HTTP_OK);
+
+ test_ok = 1;
+
+ end:
+ evutil_timerclear(&tv);
+ tv.tv_usec = 150000;
+ event_base_loopexit(arg, &tv);
+}
+
+static void
+close_detect_launch(evutil_socket_t fd, short what, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+ struct event_base *base = evhttp_connection_get_base(evcon);
+ struct evhttp_request *req;
+
+ req = evhttp_request_new(close_detect_done, base);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+ tt_fail_msg("Couldn't make request");
+ }
+}
+
+static void
+close_detect_cb(struct evhttp_request *req, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+ struct event_base *base = evhttp_connection_get_base(evcon);
+ struct timeval tv;
+
+ if (req != NULL && evhttp_request_get_response_code(req) != HTTP_OK) {
+ tt_abort_msg("Failed");
+ }
+
+ evutil_timerclear(&tv);
+ tv.tv_sec = 0; /* longer than the http time out */
+ tv.tv_usec = 600000; /* longer than the http time out */
+
+ /* launch a new request on the persistent connection in .3 seconds */
+ event_base_once(base, -1, EV_TIMEOUT, close_detect_launch, evcon, &tv);
+ end:
+ ;
+}
+
+
+static void
+http_close_detection_(struct basic_test_data *data, int with_delay)
+{
+ ev_uint16_t port = 0;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+ const struct timeval sec_tenth = { 0, 100000 };
+
+ test_ok = 0;
+ http = http_setup(&port, data->base, 0);
+
+ /* .1 second timeout */
+ evhttp_set_timeout_tv(http, &sec_tenth);
+
+ evcon = evhttp_connection_base_new(data->base, NULL,
+ "127.0.0.1", port);
+ tt_assert(evcon);
+ evhttp_connection_set_timeout_tv(evcon, &sec_tenth);
+
+
+ tt_assert(evcon);
+ delayed_client = evcon;
+
+ /*
+ * At this point, we want to schedule a request to the HTTP
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(close_detect_cb, evcon);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon,
+ req, EVHTTP_REQ_GET, with_delay ? "/largedelay" : "/test") == -1) {
+ tt_abort_msg("couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ /* at this point, the http server should have no connection */
+ tt_assert(TAILQ_FIRST(&http->connections) == NULL);
+
+ end:
+ if (evcon)
+ evhttp_connection_free(evcon);
+ if (http)
+ evhttp_free(http);
+}
+static void
+http_close_detection_test(void *arg)
+{
+ http_close_detection_(arg, 0);
+}
+static void
+http_close_detection_delay_test(void *arg)
+{
+ http_close_detection_(arg, 1);
+}
+
+static void
+http_highport_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ int i = -1;
+ struct evhttp *myhttp = NULL;
+
+ /* Try a few different ports */
+ for (i = 0; i < 50; ++i) {
+ myhttp = evhttp_new(data->base);
+ if (evhttp_bind_socket(myhttp, "127.0.0.1", 65535 - i) == 0) {
+ test_ok = 1;
+ evhttp_free(myhttp);
+ return;
+ }
+ evhttp_free(myhttp);
+ }
+
+ tt_fail_msg("Couldn't get a high port");
+}
+
+static void
+http_bad_header_test(void *ptr)
+{
+ struct evkeyvalq headers;
+
+ TAILQ_INIT(&headers);
+
+ tt_want(evhttp_add_header(&headers, "One", "Two") == 0);
+ tt_want(evhttp_add_header(&headers, "One", "Two\r\n Three") == 0);
+ tt_want(evhttp_add_header(&headers, "One\r", "Two") == -1);
+ tt_want(evhttp_add_header(&headers, "One\n", "Two") == -1);
+ tt_want(evhttp_add_header(&headers, "One", "Two\r") == -1);
+ tt_want(evhttp_add_header(&headers, "One", "Two\n") == -1);
+
+ evhttp_clear_headers(&headers);
+}
+
+static int validate_header(
+ const struct evkeyvalq* headers,
+ const char *key, const char *value)
+{
+ const char *real_val = evhttp_find_header(headers, key);
+ tt_assert(real_val != NULL);
+ tt_want(strcmp(real_val, value) == 0);
+end:
+ return (0);
+}
+
+static void
+http_parse_query_test(void *ptr)
+{
+ struct evkeyvalq headers;
+ int r;
+
+ TAILQ_INIT(&headers);
+
+ r = evhttp_parse_query("http://www.test.com/?q=test", &headers);
+ tt_want(validate_header(&headers, "q", "test") == 0);
+ tt_int_op(r, ==, 0);
+ evhttp_clear_headers(&headers);
+
+ r = evhttp_parse_query("http://www.test.com/?q=test&foo=bar", &headers);
+ tt_want(validate_header(&headers, "q", "test") == 0);
+ tt_want(validate_header(&headers, "foo", "bar") == 0);
+ tt_int_op(r, ==, 0);
+ evhttp_clear_headers(&headers);
+
+ r = evhttp_parse_query("http://www.test.com/?q=test+foo", &headers);
+ tt_want(validate_header(&headers, "q", "test foo") == 0);
+ tt_int_op(r, ==, 0);
+ evhttp_clear_headers(&headers);
+
+ r = evhttp_parse_query("http://www.test.com/?q=test%0Afoo", &headers);
+ tt_want(validate_header(&headers, "q", "test\nfoo") == 0);
+ tt_int_op(r, ==, 0);
+ evhttp_clear_headers(&headers);
+
+ r = evhttp_parse_query("http://www.test.com/?q=test%0Dfoo", &headers);
+ tt_want(validate_header(&headers, "q", "test\rfoo") == 0);
+ tt_int_op(r, ==, 0);
+ evhttp_clear_headers(&headers);
+
+ r = evhttp_parse_query("http://www.test.com/?q=test&&q2", &headers);
+ tt_int_op(r, ==, -1);
+ evhttp_clear_headers(&headers);
+
+ r = evhttp_parse_query("http://www.test.com/?q=test+this", &headers);
+ tt_want(validate_header(&headers, "q", "test this") == 0);
+ tt_int_op(r, ==, 0);
+ evhttp_clear_headers(&headers);
+
+ r = evhttp_parse_query("http://www.test.com/?q=test&q2=foo", &headers);
+ tt_int_op(r, ==, 0);
+ tt_want(validate_header(&headers, "q", "test") == 0);
+ tt_want(validate_header(&headers, "q2", "foo") == 0);
+ evhttp_clear_headers(&headers);
+
+ r = evhttp_parse_query("http://www.test.com/?q&q2=foo", &headers);
+ tt_int_op(r, ==, -1);
+ evhttp_clear_headers(&headers);
+
+ r = evhttp_parse_query("http://www.test.com/?q=foo&q2", &headers);
+ tt_int_op(r, ==, -1);
+ evhttp_clear_headers(&headers);
+
+ r = evhttp_parse_query("http://www.test.com/?q=foo&q2&q3=x", &headers);
+ tt_int_op(r, ==, -1);
+ evhttp_clear_headers(&headers);
+
+ r = evhttp_parse_query("http://www.test.com/?q=&q2=&q3=", &headers);
+ tt_int_op(r, ==, 0);
+ tt_want(validate_header(&headers, "q", "") == 0);
+ tt_want(validate_header(&headers, "q2", "") == 0);
+ tt_want(validate_header(&headers, "q3", "") == 0);
+ evhttp_clear_headers(&headers);
+
+end:
+ evhttp_clear_headers(&headers);
+}
+
+static void
+http_parse_uri_test(void *ptr)
+{
+ const int nonconform = (ptr != NULL);
+ const unsigned parse_flags =
+ nonconform ? EVHTTP_URI_NONCONFORMANT : 0;
+ struct evhttp_uri *uri = NULL;
+ char url_tmp[4096];
+#define URI_PARSE(uri) \
+ evhttp_uri_parse_with_flags((uri), parse_flags)
+
+#define TT_URI(want) do { \
+ char *ret = evhttp_uri_join(uri, url_tmp, sizeof(url_tmp)); \
+ tt_want(ret != NULL); \
+ tt_want(ret == url_tmp); \
+ if (strcmp(ret,want) != 0) \
+ TT_FAIL(("\"%s\" != \"%s\"",ret,want)); \
+ } while(0)
+
+ tt_want(evhttp_uri_join(NULL, 0, 0) == NULL);
+ tt_want(evhttp_uri_join(NULL, url_tmp, 0) == NULL);
+ tt_want(evhttp_uri_join(NULL, url_tmp, sizeof(url_tmp)) == NULL);
+
+ /* bad URIs: parsing */
+#define BAD(s) do { \
+ if (URI_PARSE(s) != NULL) \
+ TT_FAIL(("Expected error parsing \"%s\"",s)); \
+ } while(0)
+ /* Nonconformant URIs we can parse: parsing */
+#define NCF(s) do { \
+ uri = URI_PARSE(s); \
+ if (uri != NULL && !nonconform) { \
+ TT_FAIL(("Expected error parsing \"%s\"",s)); \
+ } else if (uri == NULL && nonconform) { \
+ TT_FAIL(("Couldn't parse nonconformant URI \"%s\"", \
+ s)); \
+ } \
+ if (uri) { \
+ tt_want(evhttp_uri_join(uri, url_tmp, \
+ sizeof(url_tmp))); \
+ evhttp_uri_free(uri); \
+ } \
+ } while(0)
+
+ NCF("http://www.test.com/ why hello");
+ NCF("http://www.test.com/why-hello\x01");
+ NCF("http://www.test.com/why-hello?\x01");
+ NCF("http://www.test.com/why-hello#\x01");
+ BAD("http://www.\x01.test.com/why-hello");
+ BAD("http://www.%7test.com/why-hello");
+ NCF("http://www.test.com/why-hell%7o");
+ BAD("h%3ttp://www.test.com/why-hello");
+ NCF("http://www.test.com/why-hello%7");
+ NCF("http://www.test.com/why-hell%7o");
+ NCF("http://www.test.com/foo?ba%r");
+ NCF("http://www.test.com/foo#ba%r");
+ BAD("99:99/foo");
+ BAD("http://www.test.com:999x/");
+ BAD("http://www.test.com:x/");
+ BAD("http://[hello-there]/");
+ BAD("http://[::1]]/");
+ BAD("http://[::1/");
+ BAD("http://[foob/");
+ BAD("http://[/");
+ BAD("http://[ffff:ffff:ffff:ffff:Ffff:ffff:ffff:"
+ "ffff:ffff:ffff:ffff:ffff:ffff:ffff]/");
+ BAD("http://[vX.foo]/");
+ BAD("http://[vX.foo]/");
+ BAD("http://[v.foo]/");
+ BAD("http://[v5.fo%o]/");
+ BAD("http://[v5X]/");
+ BAD("http://[v5]/");
+ BAD("http://[]/");
+ BAD("http://f\x01red@www.example.com/");
+ BAD("http://f%0red@www.example.com/");
+ BAD("http://www.example.com:9999999999999999999999999999999999999/");
+ BAD("http://www.example.com:hihi/");
+ BAD("://www.example.com/");
+
+ /* bad URIs: joining */
+ uri = evhttp_uri_new();
+ tt_want(0==evhttp_uri_set_host(uri, "www.example.com"));
+ tt_want(evhttp_uri_join(uri, url_tmp, sizeof(url_tmp)) != NULL);
+ /* not enough space: */
+ tt_want(evhttp_uri_join(uri, url_tmp, 3) == NULL);
+ /* host is set, but path doesn't start with "/": */
+ tt_want(0==evhttp_uri_set_path(uri, "hi_mom"));
+ tt_want(evhttp_uri_join(uri, url_tmp, sizeof(url_tmp)) == NULL);
+ tt_want(evhttp_uri_join(uri, NULL, sizeof(url_tmp))==NULL);
+ tt_want(evhttp_uri_join(uri, url_tmp, 0)==NULL);
+ evhttp_uri_free(uri);
+ uri = URI_PARSE("mailto:foo@bar");
+ tt_want(uri != NULL);
+ tt_want(evhttp_uri_get_host(uri) == NULL);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want(!strcmp(evhttp_uri_get_scheme(uri), "mailto"));
+ tt_want(!strcmp(evhttp_uri_get_path(uri), "foo@bar"));
+ tt_want(evhttp_uri_get_query(uri) == NULL);
+ tt_want(evhttp_uri_get_fragment(uri) == NULL);
+ TT_URI("mailto:foo@bar");
+ evhttp_uri_free(uri);
+
+ uri = evhttp_uri_new();
+ /* Bad URI usage: setting invalid values */
+ tt_want(-1 == evhttp_uri_set_scheme(uri,""));
+ tt_want(-1 == evhttp_uri_set_scheme(uri,"33"));
+ tt_want(-1 == evhttp_uri_set_scheme(uri,"hi!"));
+ tt_want(-1 == evhttp_uri_set_userinfo(uri,"hello@"));
+ tt_want(-1 == evhttp_uri_set_host(uri,"[1.2.3.4]"));
+ tt_want(-1 == evhttp_uri_set_host(uri,"["));
+ tt_want(-1 == evhttp_uri_set_host(uri,"www.[foo].com"));
+ tt_want(-1 == evhttp_uri_set_port(uri,-3));
+ tt_want(-1 == evhttp_uri_set_path(uri,"hello?world"));
+ tt_want(-1 == evhttp_uri_set_query(uri,"hello#world"));
+ tt_want(-1 == evhttp_uri_set_fragment(uri,"hello#world"));
+ /* Valid URI usage: setting valid values */
+ tt_want(0 == evhttp_uri_set_scheme(uri,"http"));
+ tt_want(0 == evhttp_uri_set_scheme(uri,NULL));
+ tt_want(0 == evhttp_uri_set_userinfo(uri,"username:pass"));
+ tt_want(0 == evhttp_uri_set_userinfo(uri,NULL));
+ tt_want(0 == evhttp_uri_set_host(uri,"www.example.com"));
+ tt_want(0 == evhttp_uri_set_host(uri,"1.2.3.4"));
+ tt_want(0 == evhttp_uri_set_host(uri,"[1:2:3:4::]"));
+ tt_want(0 == evhttp_uri_set_host(uri,"[v7.wobblewobble]"));
+ tt_want(0 == evhttp_uri_set_host(uri,NULL));
+ tt_want(0 == evhttp_uri_set_host(uri,""));
+ tt_want(0 == evhttp_uri_set_port(uri, -1));
+ tt_want(0 == evhttp_uri_set_port(uri, 80));
+ tt_want(0 == evhttp_uri_set_port(uri, 65535));
+ tt_want(0 == evhttp_uri_set_path(uri, ""));
+ tt_want(0 == evhttp_uri_set_path(uri, "/documents/public/index.html"));
+ tt_want(0 == evhttp_uri_set_path(uri, NULL));
+ tt_want(0 == evhttp_uri_set_query(uri, "key=val&key2=val2"));
+ tt_want(0 == evhttp_uri_set_query(uri, "keyvalblarg"));
+ tt_want(0 == evhttp_uri_set_query(uri, ""));
+ tt_want(0 == evhttp_uri_set_query(uri, NULL));
+ tt_want(0 == evhttp_uri_set_fragment(uri, ""));
+ tt_want(0 == evhttp_uri_set_fragment(uri, "here?i?am"));
+ tt_want(0 == evhttp_uri_set_fragment(uri, NULL));
+ evhttp_uri_free(uri);
+
+ /* Valid parsing */
+ uri = URI_PARSE("http://www.test.com/?q=t%33est");
+ tt_want(strcmp(evhttp_uri_get_scheme(uri), "http") == 0);
+ tt_want(strcmp(evhttp_uri_get_host(uri), "www.test.com") == 0);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0);
+ tt_want(strcmp(evhttp_uri_get_query(uri), "q=t%33est") == 0);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want(evhttp_uri_get_fragment(uri) == NULL);
+ TT_URI("http://www.test.com/?q=t%33est");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("http://%77ww.test.com");
+ tt_want(strcmp(evhttp_uri_get_scheme(uri), "http") == 0);
+ tt_want(strcmp(evhttp_uri_get_host(uri), "%77ww.test.com") == 0);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "") == 0);
+ tt_want(evhttp_uri_get_query(uri) == NULL);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want(evhttp_uri_get_fragment(uri) == NULL);
+ TT_URI("http://%77ww.test.com");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("http://www.test.com?q=test");
+ tt_want(strcmp(evhttp_uri_get_scheme(uri), "http") == 0);
+ tt_want(strcmp(evhttp_uri_get_host(uri), "www.test.com") == 0);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "") == 0);
+ tt_want(strcmp(evhttp_uri_get_query(uri), "q=test") == 0);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want(evhttp_uri_get_fragment(uri) == NULL);
+ TT_URI("http://www.test.com?q=test");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("http://www.test.com#fragment");
+ tt_want(strcmp(evhttp_uri_get_scheme(uri), "http") == 0);
+ tt_want(strcmp(evhttp_uri_get_host(uri), "www.test.com") == 0);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "") == 0);
+ tt_want(evhttp_uri_get_query(uri) == NULL);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want_str_op(evhttp_uri_get_fragment(uri), ==, "fragment");
+ TT_URI("http://www.test.com#fragment");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("http://8000/");
+ tt_want(strcmp(evhttp_uri_get_scheme(uri), "http") == 0);
+ tt_want(strcmp(evhttp_uri_get_host(uri), "8000") == 0);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0);
+ tt_want(evhttp_uri_get_query(uri) == NULL);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want(evhttp_uri_get_fragment(uri) == NULL);
+ TT_URI("http://8000/");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("http://:8000/");
+ tt_want(strcmp(evhttp_uri_get_scheme(uri), "http") == 0);
+ tt_want(strcmp(evhttp_uri_get_host(uri), "") == 0);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0);
+ tt_want(evhttp_uri_get_query(uri) == NULL);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(evhttp_uri_get_port(uri) == 8000);
+ tt_want(evhttp_uri_get_fragment(uri) == NULL);
+ TT_URI("http://:8000/");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("http://www.test.com:/"); /* empty port */
+ tt_want(strcmp(evhttp_uri_get_scheme(uri), "http") == 0);
+ tt_want(strcmp(evhttp_uri_get_host(uri), "www.test.com") == 0);
+ tt_want_str_op(evhttp_uri_get_path(uri), ==, "/");
+ tt_want(evhttp_uri_get_query(uri) == NULL);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want(evhttp_uri_get_fragment(uri) == NULL);
+ TT_URI("http://www.test.com/");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("http://www.test.com:"); /* empty port 2 */
+ tt_want(strcmp(evhttp_uri_get_scheme(uri), "http") == 0);
+ tt_want(strcmp(evhttp_uri_get_host(uri), "www.test.com") == 0);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "") == 0);
+ tt_want(evhttp_uri_get_query(uri) == NULL);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want(evhttp_uri_get_fragment(uri) == NULL);
+ TT_URI("http://www.test.com");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("ftp://www.test.com/?q=test");
+ tt_want(strcmp(evhttp_uri_get_scheme(uri), "ftp") == 0);
+ tt_want(strcmp(evhttp_uri_get_host(uri), "www.test.com") == 0);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0);
+ tt_want(strcmp(evhttp_uri_get_query(uri), "q=test") == 0);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want(evhttp_uri_get_fragment(uri) == NULL);
+ TT_URI("ftp://www.test.com/?q=test");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("ftp://[::1]:999/?q=test");
+ tt_want(strcmp(evhttp_uri_get_scheme(uri), "ftp") == 0);
+ tt_want(strcmp(evhttp_uri_get_host(uri), "[::1]") == 0);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0);
+ tt_want(strcmp(evhttp_uri_get_query(uri), "q=test") == 0);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(evhttp_uri_get_port(uri) == 999);
+ tt_want(evhttp_uri_get_fragment(uri) == NULL);
+ TT_URI("ftp://[::1]:999/?q=test");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("ftp://[ff00::127.0.0.1]/?q=test");
+ tt_want(strcmp(evhttp_uri_get_scheme(uri), "ftp") == 0);
+ tt_want(strcmp(evhttp_uri_get_host(uri), "[ff00::127.0.0.1]") == 0);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0);
+ tt_want(strcmp(evhttp_uri_get_query(uri), "q=test") == 0);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want(evhttp_uri_get_fragment(uri) == NULL);
+ TT_URI("ftp://[ff00::127.0.0.1]/?q=test");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("ftp://[v99.not_(any:time)_soon]/?q=test");
+ tt_want(strcmp(evhttp_uri_get_scheme(uri), "ftp") == 0);
+ tt_want(strcmp(evhttp_uri_get_host(uri), "[v99.not_(any:time)_soon]") == 0);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0);
+ tt_want(strcmp(evhttp_uri_get_query(uri), "q=test") == 0);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want(evhttp_uri_get_fragment(uri) == NULL);
+ TT_URI("ftp://[v99.not_(any:time)_soon]/?q=test");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("scheme://user:pass@foo.com:42/?q=test&s=some+thing#fragment");
+ tt_want(strcmp(evhttp_uri_get_scheme(uri), "scheme") == 0);
+ tt_want(strcmp(evhttp_uri_get_userinfo(uri), "user:pass") == 0);
+ tt_want(strcmp(evhttp_uri_get_host(uri), "foo.com") == 0);
+ tt_want(evhttp_uri_get_port(uri) == 42);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0);
+ tt_want(strcmp(evhttp_uri_get_query(uri), "q=test&s=some+thing") == 0);
+ tt_want(strcmp(evhttp_uri_get_fragment(uri), "fragment") == 0);
+ TT_URI("scheme://user:pass@foo.com:42/?q=test&s=some+thing#fragment");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("scheme://user@foo.com/#fragment");
+ tt_want(strcmp(evhttp_uri_get_scheme(uri), "scheme") == 0);
+ tt_want(strcmp(evhttp_uri_get_userinfo(uri), "user") == 0);
+ tt_want(strcmp(evhttp_uri_get_host(uri), "foo.com") == 0);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0);
+ tt_want(evhttp_uri_get_query(uri) == NULL);
+ tt_want(strcmp(evhttp_uri_get_fragment(uri), "fragment") == 0);
+ TT_URI("scheme://user@foo.com/#fragment");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("scheme://%75ser@foo.com/#frag@ment");
+ tt_want(strcmp(evhttp_uri_get_scheme(uri), "scheme") == 0);
+ tt_want(strcmp(evhttp_uri_get_userinfo(uri), "%75ser") == 0);
+ tt_want(strcmp(evhttp_uri_get_host(uri), "foo.com") == 0);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "/") == 0);
+ tt_want(evhttp_uri_get_query(uri) == NULL);
+ tt_want(strcmp(evhttp_uri_get_fragment(uri), "frag@ment") == 0);
+ TT_URI("scheme://%75ser@foo.com/#frag@ment");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("file:///some/path/to/the/file");
+ tt_want(strcmp(evhttp_uri_get_scheme(uri), "file") == 0);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(strcmp(evhttp_uri_get_host(uri), "") == 0);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "/some/path/to/the/file") == 0);
+ tt_want(evhttp_uri_get_query(uri) == NULL);
+ tt_want(evhttp_uri_get_fragment(uri) == NULL);
+ TT_URI("file:///some/path/to/the/file");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("///some/path/to/the-file");
+ tt_want(uri != NULL);
+ tt_want(evhttp_uri_get_scheme(uri) == NULL);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(strcmp(evhttp_uri_get_host(uri), "") == 0);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "/some/path/to/the-file") == 0);
+ tt_want(evhttp_uri_get_query(uri) == NULL);
+ tt_want(evhttp_uri_get_fragment(uri) == NULL);
+ TT_URI("///some/path/to/the-file");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("/s:ome/path/to/the-file?q=99#fred");
+ tt_want(uri != NULL);
+ tt_want(evhttp_uri_get_scheme(uri) == NULL);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(evhttp_uri_get_host(uri) == NULL);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "/s:ome/path/to/the-file") == 0);
+ tt_want(strcmp(evhttp_uri_get_query(uri), "q=99") == 0);
+ tt_want(strcmp(evhttp_uri_get_fragment(uri), "fred") == 0);
+ TT_URI("/s:ome/path/to/the-file?q=99#fred");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("relative/path/with/co:lon");
+ tt_want(uri != NULL);
+ tt_want(evhttp_uri_get_scheme(uri) == NULL);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(evhttp_uri_get_host(uri) == NULL);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "relative/path/with/co:lon") == 0);
+ tt_want(evhttp_uri_get_query(uri) == NULL);
+ tt_want(evhttp_uri_get_fragment(uri) == NULL);
+ TT_URI("relative/path/with/co:lon");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("bob?q=99&q2=q?33#fr?ed");
+ tt_want(uri != NULL);
+ tt_want(evhttp_uri_get_scheme(uri) == NULL);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(evhttp_uri_get_host(uri) == NULL);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "bob") == 0);
+ tt_want(strcmp(evhttp_uri_get_query(uri), "q=99&q2=q?33") == 0);
+ tt_want(strcmp(evhttp_uri_get_fragment(uri), "fr?ed") == 0);
+ TT_URI("bob?q=99&q2=q?33#fr?ed");
+ evhttp_uri_free(uri);
+
+ uri = URI_PARSE("#fr?ed");
+ tt_want(uri != NULL);
+ tt_want(evhttp_uri_get_scheme(uri) == NULL);
+ tt_want(evhttp_uri_get_userinfo(uri) == NULL);
+ tt_want(evhttp_uri_get_host(uri) == NULL);
+ tt_want(evhttp_uri_get_port(uri) == -1);
+ tt_want(strcmp(evhttp_uri_get_path(uri), "") == 0);
+ tt_want(evhttp_uri_get_query(uri) == NULL);
+ tt_want(strcmp(evhttp_uri_get_fragment(uri), "fr?ed") == 0);
+ TT_URI("#fr?ed");
+ evhttp_uri_free(uri);
+#undef URI_PARSE
+#undef TT_URI
+#undef BAD
+}
+
+static void
+http_uriencode_test(void *ptr)
+{
+ char *s=NULL, *s2=NULL;
+ size_t sz;
+ int bytes_decoded;
+
+#define ENC(from,want,plus) do { \
+ s = evhttp_uriencode((from), -1, (plus)); \
+ tt_assert(s); \
+ tt_str_op(s,==,(want)); \
+ sz = -1; \
+ s2 = evhttp_uridecode((s), (plus), &sz); \
+ tt_assert(s2); \
+ tt_str_op(s2,==,(from)); \
+ tt_int_op(sz,==,strlen(from)); \
+ free(s); \
+ free(s2); \
+ s = s2 = NULL; \
+ } while (0)
+
+#define DEC(from,want,dp) do { \
+ s = evhttp_uridecode((from),(dp),&sz); \
+ tt_assert(s); \
+ tt_str_op(s,==,(want)); \
+ tt_int_op(sz,==,strlen(want)); \
+ free(s); \
+ s = NULL; \
+ } while (0)
+
+#define OLD_DEC(from,want) do { \
+ s = evhttp_decode_uri((from)); \
+ tt_assert(s); \
+ tt_str_op(s,==,(want)); \
+ free(s); \
+ s = NULL; \
+ } while (0)
+
+
+ ENC("Hello", "Hello",0);
+ ENC("99", "99",0);
+ ENC("", "",0);
+ ENC(
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ123456789-.~_",
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ123456789-.~_",0);
+ ENC(" ", "%20",0);
+ ENC(" ", "+",1);
+ ENC("\xff\xf0\xe0", "%FF%F0%E0",0);
+ ENC("\x01\x19", "%01%19",1);
+ ENC("http://www.ietf.org/rfc/rfc3986.txt",
+ "http%3A%2F%2Fwww.ietf.org%2Frfc%2Frfc3986.txt",1);
+
+ ENC("1+2=3", "1%2B2%3D3",1);
+ ENC("1+2=3", "1%2B2%3D3",0);
+
+ /* Now try encoding with internal NULs. */
+ s = evhttp_uriencode("hello\0world", 11, 0);
+ tt_assert(s);
+ tt_str_op(s,==,"hello%00world");
+ free(s);
+ s = NULL;
+
+ /* Now try decoding just part of string. */
+ s = malloc(6 + 1 /* NUL byte */);
+ bytes_decoded = evhttp_decode_uri_internal("hello%20%20", 6, s, 0);
+ tt_assert(s);
+ tt_int_op(bytes_decoded,==,6);
+ tt_str_op(s,==,"hello%");
+ free(s);
+ s = NULL;
+
+ /* Now try out some decoding cases that we don't generate with
+ * encode_uri: Make sure that malformed stuff doesn't crash... */
+ DEC("%%xhello th+ere \xff",
+ "%%xhello th+ere \xff", 0);
+ /* Make sure plus decoding works */
+ DEC("plus+should%20work+", "plus should work ",1);
+ /* Try some lowercase hex */
+ DEC("%f0%a0%b0", "\xf0\xa0\xb0",1);
+
+ /* Try an internal NUL. */
+ sz = 0;
+ s = evhttp_uridecode("%00%00x%00%00", 1, &sz);
+ tt_int_op(sz,==,5);
+ tt_assert(!memcmp(s, "\0\0x\0\0", 5));
+ free(s);
+ s = NULL;
+
+ /* Try with size == NULL */
+ sz = 0;
+ s = evhttp_uridecode("%00%00x%00%00", 1, NULL);
+ tt_assert(!memcmp(s, "\0\0x\0\0", 5));
+ free(s);
+ s = NULL;
+
+ /* Test out the crazy old behavior of the deprecated
+ * evhttp_decode_uri */
+ OLD_DEC("http://example.com/normal+path/?key=val+with+spaces",
+ "http://example.com/normal+path/?key=val with spaces");
+
+end:
+ if (s)
+ free(s);
+ if (s2)
+ free(s2);
+#undef ENC
+#undef DEC
+#undef OLD_DEC
+}
+
+static void
+http_base_test(void *ptr)
+{
+ struct event_base *base = NULL;
+ struct bufferevent *bev;
+ evutil_socket_t fd;
+ const char *http_request;
+ ev_uint16_t port = 0;
+
+ test_ok = 0;
+ base = event_base_new();
+ tt_assert(base);
+ http = http_setup(&port, base, 0);
+
+ fd = http_connect("127.0.0.1", port);
+ tt_int_op(fd, >=, 0);
+
+ /* Stupid thing to send a request */
+ bev = bufferevent_socket_new(base, fd, 0);
+ bufferevent_setcb(bev, http_readcb, http_writecb,
+ http_errorcb, base);
+ bufferevent_base_set(base, bev);
+
+ http_request =
+ "GET /test HTTP/1.1\r\n"
+ "Host: somehost\r\n"
+ "Connection: close\r\n"
+ "\r\n";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+
+ event_base_dispatch(base);
+
+ bufferevent_free(bev);
+ evutil_closesocket(fd);
+
+ evhttp_free(http);
+
+ tt_int_op(test_ok, ==, 2);
+
+end:
+ if (base)
+ event_base_free(base);
+}
+
+/*
+ * the server is just going to close the connection if it times out during
+ * reading the headers.
+ */
+
+static void
+http_incomplete_readcb(struct bufferevent *bev, void *arg)
+{
+ test_ok = -1;
+ event_base_loopexit(exit_base,NULL);
+}
+
+static void
+http_incomplete_errorcb(struct bufferevent *bev, short what, void *arg)
+{
+ /** For ssl */
+ if (what & BEV_EVENT_CONNECTED)
+ return;
+
+ if (what == (BEV_EVENT_READING|BEV_EVENT_EOF))
+ test_ok++;
+ else
+ test_ok = -2;
+ event_base_loopexit(exit_base,NULL);
+}
+
+static void
+http_incomplete_writecb(struct bufferevent *bev, void *arg)
+{
+ if (arg != NULL) {
+ evutil_socket_t fd = *(evutil_socket_t *)arg;
+ /* terminate the write side to simulate EOF */
+ shutdown(fd, SHUT_WR);
+ }
+ if (evbuffer_get_length(bufferevent_get_output(bev)) == 0) {
+ /* enable reading of the reply */
+ bufferevent_enable(bev, EV_READ);
+ test_ok++;
+ }
+}
+
+static void
+http_incomplete_test_(struct basic_test_data *data, int use_timeout, int ssl)
+{
+ struct bufferevent *bev;
+ evutil_socket_t fd;
+ const char *http_request;
+ ev_uint16_t port = 0;
+ struct timeval tv_start, tv_end;
+
+ exit_base = data->base;
+
+ test_ok = 0;
+
+ http = http_setup(&port, data->base, ssl ? HTTP_BIND_SSL : 0);
+ evhttp_set_timeout(http, 1);
+
+ fd = http_connect("127.0.0.1", port);
+ tt_int_op(fd, >=, 0);
+
+ /* Stupid thing to send a request */
+ bev = create_bev(data->base, fd, ssl);
+ bufferevent_setcb(bev,
+ http_incomplete_readcb, http_incomplete_writecb,
+ http_incomplete_errorcb, use_timeout ? NULL : &fd);
+
+ http_request =
+ "GET /test HTTP/1.1\r\n"
+ "Host: somehost\r\n";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+
+ evutil_gettimeofday(&tv_start, NULL);
+
+ event_base_dispatch(data->base);
+
+ evutil_gettimeofday(&tv_end, NULL);
+ evutil_timersub(&tv_end, &tv_start, &tv_end);
+
+ bufferevent_free(bev);
+ if (use_timeout) {
+ evutil_closesocket(fd);
+ fd = -1;
+ }
+
+ evhttp_free(http);
+
+ if (use_timeout && tv_end.tv_sec >= 3) {
+ tt_abort_msg("time");
+ } else if (!use_timeout && tv_end.tv_sec >= 1) {
+ /* we should be done immediately */
+ tt_abort_msg("time");
+ }
+
+ tt_int_op(test_ok, ==, 2);
+ end:
+ if (fd >= 0)
+ evutil_closesocket(fd);
+}
+static void http_incomplete_test(void *arg)
+{ http_incomplete_test_(arg, 0, 0); }
+static void http_incomplete_timeout_test(void *arg)
+{ http_incomplete_test_(arg, 1, 0); }
+
+
+/*
+ * the server is going to reply with chunked data.
+ */
+
+static void
+http_chunked_readcb(struct bufferevent *bev, void *arg)
+{
+ /* nothing here */
+}
+
+static void
+http_chunked_errorcb(struct bufferevent *bev, short what, void *arg)
+{
+ struct evhttp_request *req = NULL;
+
+ /** SSL */
+ if (what & BEV_EVENT_CONNECTED)
+ return;
+
+ if (!test_ok)
+ goto out;
+
+ test_ok = -1;
+
+ if ((what & BEV_EVENT_EOF) != 0) {
+ const char *header;
+ enum message_read_status done;
+ req = evhttp_request_new(NULL, NULL);
+
+ /* req->kind = EVHTTP_RESPONSE; */
+ done = evhttp_parse_firstline_(req, bufferevent_get_input(bev));
+ if (done != ALL_DATA_READ)
+ goto out;
+
+ done = evhttp_parse_headers_(req, bufferevent_get_input(bev));
+ if (done != ALL_DATA_READ)
+ goto out;
+
+ header = evhttp_find_header(evhttp_request_get_input_headers(req), "Transfer-Encoding");
+ if (header == NULL || strcmp(header, "chunked"))
+ goto out;
+
+ header = evhttp_find_header(evhttp_request_get_input_headers(req), "Connection");
+ if (header == NULL || strcmp(header, "close"))
+ goto out;
+
+ header = evbuffer_readln(bufferevent_get_input(bev), NULL, EVBUFFER_EOL_CRLF);
+ if (header == NULL)
+ goto out;
+ /* 13 chars */
+ if (strcmp(header, "d")) {
+ free((void*)header);
+ goto out;
+ }
+ free((void*)header);
+
+ if (strncmp((char *)evbuffer_pullup(bufferevent_get_input(bev), 13),
+ "This is funny", 13))
+ goto out;
+
+ evbuffer_drain(bufferevent_get_input(bev), 13 + 2);
+
+ header = evbuffer_readln(bufferevent_get_input(bev), NULL, EVBUFFER_EOL_CRLF);
+ if (header == NULL)
+ goto out;
+ /* 18 chars */
+ if (strcmp(header, "12"))
+ goto out;
+ free((char *)header);
+
+ if (strncmp((char *)evbuffer_pullup(bufferevent_get_input(bev), 18),
+ "but not hilarious.", 18))
+ goto out;
+
+ evbuffer_drain(bufferevent_get_input(bev), 18 + 2);
+
+ header = evbuffer_readln(bufferevent_get_input(bev), NULL, EVBUFFER_EOL_CRLF);
+ if (header == NULL)
+ goto out;
+ /* 8 chars */
+ if (strcmp(header, "8")) {
+ free((void*)header);
+ goto out;
+ }
+ free((char *)header);
+
+ if (strncmp((char *)evbuffer_pullup(bufferevent_get_input(bev), 8),
+ "bwv 1052.", 8))
+ goto out;
+
+ evbuffer_drain(bufferevent_get_input(bev), 8 + 2);
+
+ header = evbuffer_readln(bufferevent_get_input(bev), NULL, EVBUFFER_EOL_CRLF);
+ if (header == NULL)
+ goto out;
+ /* 0 chars */
+ if (strcmp(header, "0")) {
+ free((void*)header);
+ goto out;
+ }
+ free((void *)header);
+
+ test_ok = 2;
+ }
+
+out:
+ if (req)
+ evhttp_request_free(req);
+
+ event_base_loopexit(arg, NULL);
+}
+
+static void
+http_chunked_writecb(struct bufferevent *bev, void *arg)
+{
+ if (evbuffer_get_length(bufferevent_get_output(bev)) == 0) {
+ /* enable reading of the reply */
+ bufferevent_enable(bev, EV_READ);
+ test_ok++;
+ }
+}
+
+static void
+http_chunked_request_done(struct evhttp_request *req, void *arg)
+{
+ if (evhttp_request_get_response_code(req) != HTTP_OK) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (evhttp_find_header(evhttp_request_get_input_headers(req),
+ "Transfer-Encoding") == NULL) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (evbuffer_get_length(evhttp_request_get_input_buffer(req)) != 13 + 18 + 8) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (strncmp((char *)evbuffer_pullup(evhttp_request_get_input_buffer(req), 13 + 18 + 8),
+ "This is funnybut not hilarious.bwv 1052",
+ 13 + 18 + 8)) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ test_ok = 1;
+ event_base_loopexit(arg, NULL);
+}
+
+static void
+http_chunk_out_test_impl(void *arg, int ssl)
+{
+ struct basic_test_data *data = arg;
+ struct bufferevent *bev;
+ evutil_socket_t fd;
+ const char *http_request;
+ ev_uint16_t port = 0;
+ struct timeval tv_start, tv_end;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+ int i;
+
+ exit_base = data->base;
+ test_ok = 0;
+
+ http = http_setup(&port, data->base, ssl ? HTTP_BIND_SSL : 0);
+
+ fd = http_connect("127.0.0.1", port);
+
+ /* Stupid thing to send a request */
+ bev = create_bev(data->base, fd, ssl);
+ bufferevent_setcb(bev,
+ http_chunked_readcb, http_chunked_writecb,
+ http_chunked_errorcb, data->base);
+
+ http_request =
+ "GET /chunked HTTP/1.1\r\n"
+ "Host: somehost\r\n"
+ "Connection: close\r\n"
+ "\r\n";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+
+ evutil_gettimeofday(&tv_start, NULL);
+
+ event_base_dispatch(data->base);
+
+ bufferevent_free(bev);
+
+ evutil_gettimeofday(&tv_end, NULL);
+ evutil_timersub(&tv_end, &tv_start, &tv_end);
+
+ tt_int_op(tv_end.tv_sec, <, 1);
+
+ tt_int_op(test_ok, ==, 2);
+
+ /* now try again with the regular connection object */
+ bev = create_bev(data->base, -1, ssl);
+ evcon = evhttp_connection_base_bufferevent_new(
+ data->base, NULL, bev, "127.0.0.1", port);
+ tt_assert(evcon);
+
+ /* make two requests to check the keepalive behavior */
+ for (i = 0; i < 2; i++) {
+ test_ok = 0;
+ req = evhttp_request_new(http_chunked_request_done,data->base);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req,
+ EVHTTP_REQ_GET, "/chunked") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ tt_assert(test_ok == 1);
+ }
+
+ end:
+ if (evcon)
+ evhttp_connection_free(evcon);
+ if (http)
+ evhttp_free(http);
+}
+static void http_chunk_out_test(void *arg)
+{ return http_chunk_out_test_impl(arg, 0); }
+
+static void
+http_stream_out_test_impl(void *arg, int ssl)
+{
+ struct basic_test_data *data = arg;
+ ev_uint16_t port = 0;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+ struct bufferevent *bev;
+
+ test_ok = 0;
+ exit_base = data->base;
+
+ http = http_setup(&port, data->base, ssl ? HTTP_BIND_SSL : 0);
+
+ bev = create_bev(data->base, -1, ssl);
+ evcon = evhttp_connection_base_bufferevent_new(
+ data->base, NULL, bev, "127.0.0.1", port);
+ tt_assert(evcon);
+
+ /*
+ * At this point, we want to schedule a request to the HTTP
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(http_request_done,
+ (void *)"This is funnybut not hilarious.bwv 1052");
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/streamed")
+ == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ end:
+ if (evcon)
+ evhttp_connection_free(evcon);
+ if (http)
+ evhttp_free(http);
+}
+static void http_stream_out_test(void *arg)
+{ return http_stream_out_test_impl(arg, 0); }
+
+static void
+http_stream_in_chunk(struct evhttp_request *req, void *arg)
+{
+ struct evbuffer *reply = arg;
+
+ if (evhttp_request_get_response_code(req) != HTTP_OK) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ evbuffer_add_buffer(reply, evhttp_request_get_input_buffer(req));
+}
+
+static void
+http_stream_in_done(struct evhttp_request *req, void *arg)
+{
+ if (evbuffer_get_length(evhttp_request_get_input_buffer(req)) != 0) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ event_base_loopexit(exit_base, NULL);
+}
+
+/**
+ * Makes a request and reads the response in chunks.
+ */
+static void
+http_stream_in_test_(struct basic_test_data *data, char const *url,
+ size_t expected_len, char const *expected)
+{
+ struct evhttp_connection *evcon;
+ struct evbuffer *reply = evbuffer_new();
+ struct evhttp_request *req = NULL;
+ ev_uint16_t port = 0;
+
+ exit_base = data->base;
+ http = http_setup(&port, data->base, 0);
+
+ evcon = evhttp_connection_base_new(data->base, NULL,"127.0.0.1", port);
+ tt_assert(evcon);
+
+ req = evhttp_request_new(http_stream_in_done, reply);
+ evhttp_request_set_chunked_cb(req, http_stream_in_chunk);
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, url) == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ if (evbuffer_get_length(reply) != expected_len) {
+ TT_DIE(("reply length %lu; expected %lu; FAILED (%s)\n",
+ (unsigned long)evbuffer_get_length(reply),
+ (unsigned long)expected_len,
+ (char*)evbuffer_pullup(reply, -1)));
+ }
+
+ if (memcmp(evbuffer_pullup(reply, -1), expected, expected_len) != 0) {
+ tt_abort_msg("Memory mismatch");
+ }
+
+ test_ok = 1;
+ end:
+ if (reply)
+ evbuffer_free(reply);
+ if (evcon)
+ evhttp_connection_free(evcon);
+ if (http)
+ evhttp_free(http);
+}
+
+static void
+http_stream_in_test(void *arg)
+{
+ http_stream_in_test_(arg, "/chunked", 13 + 18 + 8,
+ "This is funnybut not hilarious.bwv 1052");
+
+ http_stream_in_test_(arg, "/test", strlen(BASIC_REQUEST_BODY),
+ BASIC_REQUEST_BODY);
+}
+
+static void
+http_stream_in_cancel_chunk(struct evhttp_request *req, void *arg)
+{
+ tt_int_op(evhttp_request_get_response_code(req), ==, HTTP_OK);
+
+ end:
+ evhttp_cancel_request(req);
+ event_base_loopexit(arg, NULL);
+}
+
+static void
+http_stream_in_cancel_done(struct evhttp_request *req, void *arg)
+{
+ /* should never be called */
+ tt_fail_msg("In cancel done");
+}
+
+static void
+http_stream_in_cancel_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct evhttp_connection *evcon;
+ struct evhttp_request *req = NULL;
+ ev_uint16_t port = 0;
+
+ http = http_setup(&port, data->base, 0);
+
+ evcon = evhttp_connection_base_new(data->base, NULL, "127.0.0.1", port);
+ tt_assert(evcon);
+
+ req = evhttp_request_new(http_stream_in_cancel_done, data->base);
+ evhttp_request_set_chunked_cb(req, http_stream_in_cancel_chunk);
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/chunked") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ test_ok = 1;
+ end:
+ evhttp_connection_free(evcon);
+ evhttp_free(http);
+
+}
+
+static void
+http_connection_fail_done(struct evhttp_request *req, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+ struct event_base *base = evhttp_connection_get_base(evcon);
+
+ /* An ENETUNREACH error results in an unrecoverable
+ * evhttp_connection error (see evhttp_connection_fail_()). The
+ * connection will be reset, and the user will be notified with a NULL
+ * req parameter. */
+ tt_assert(!req);
+
+ evhttp_connection_free(evcon);
+
+ test_ok = 1;
+
+ end:
+ event_base_loopexit(base, NULL);
+}
+
+/* Test unrecoverable evhttp_connection errors by generating an ENETUNREACH
+ * error on connection. */
+static void
+http_connection_fail_test_impl(void *arg, int ssl)
+{
+ struct basic_test_data *data = arg;
+ ev_uint16_t port = 0;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+ struct bufferevent *bev;
+
+ exit_base = data->base;
+ test_ok = 0;
+
+ /* auto detect a port */
+ http = http_setup(&port, data->base, ssl ? HTTP_BIND_SSL : 0);
+ evhttp_free(http);
+ http = NULL;
+
+ bev = create_bev(data->base, -1, ssl);
+ /* Pick an unroutable address. This administratively scoped multicast
+ * address should do when working with TCP. */
+ evcon = evhttp_connection_base_bufferevent_new(
+ data->base, NULL, bev, "239.10.20.30", 80);
+ tt_assert(evcon);
+
+ /*
+ * At this point, we want to schedule an HTTP GET request
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(http_connection_fail_done, evcon);
+ tt_assert(req);
+
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ tt_int_op(test_ok, ==, 1);
+
+ end:
+ ;
+}
+static void http_connection_fail_test(void *arg)
+{ return http_connection_fail_test_impl(arg, 0); }
+
+static void
+http_connection_retry_done(struct evhttp_request *req, void *arg)
+{
+ tt_assert(req);
+ tt_int_op(evhttp_request_get_response_code(req), !=, HTTP_OK);
+ if (evhttp_find_header(evhttp_request_get_input_headers(req), "Content-Type") != NULL) {
+ tt_abort_msg("(content type)\n");
+ }
+
+ tt_uint_op(evbuffer_get_length(evhttp_request_get_input_buffer(req)), ==, 0);
+
+ test_ok = 1;
+ end:
+ event_base_loopexit(arg,NULL);
+}
+
+struct http_server
+{
+ ev_uint16_t port;
+ int ssl;
+};
+static struct event_base *http_make_web_server_base=NULL;
+static void
+http_make_web_server(evutil_socket_t fd, short what, void *arg)
+{
+ struct http_server *hs = (struct http_server *)arg;
+ http = http_setup(&hs->port, http_make_web_server_base, hs->ssl ? HTTP_BIND_SSL : 0);
+}
+
+static void
+http_simple_test_impl(void *arg, int ssl, int dirty)
+{
+ struct basic_test_data *data = arg;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+ struct bufferevent *bev;
+ struct http_server hs = { .port = 0, .ssl = ssl, };
+
+ exit_base = data->base;
+ test_ok = 0;
+
+ http = http_setup(&hs.port, data->base, ssl ? HTTP_BIND_SSL : 0);
+
+ bev = create_bev(data->base, -1, ssl);
+ evcon = evhttp_connection_base_bufferevent_new(
+ data->base, NULL, bev, "127.0.0.1", hs.port);
+ tt_assert(evcon);
+ evhttp_connection_set_local_address(evcon, "127.0.0.1");
+
+ req = evhttp_request_new(http_request_done, (void*) BASIC_REQUEST_BODY);
+ tt_assert(req);
+
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+ tt_int_op(test_ok, ==, 1);
+
+ end:
+ if (evcon)
+ evhttp_connection_free(evcon);
+ if (http)
+ evhttp_free(http);
+}
+static void http_simple_test(void *arg)
+{ return http_simple_test_impl(arg, 0, 0); }
+
+static void
+http_connection_retry_test_basic(void *arg, const char *addr, struct evdns_base *dns_base, int ssl)
+{
+ struct basic_test_data *data = arg;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+ struct timeval tv, tv_start, tv_end;
+ struct bufferevent *bev;
+ struct http_server hs = { .port = 0, .ssl = ssl, };
+
+ exit_base = data->base;
+ test_ok = 0;
+
+ /* auto detect a port */
+ http = http_setup(&hs.port, data->base, ssl ? HTTP_BIND_SSL : 0);
+ evhttp_free(http);
+ http = NULL;
+
+ bev = create_bev(data->base, -1, ssl);
+ evcon = evhttp_connection_base_bufferevent_new(data->base, dns_base, bev, addr, hs.port);
+ tt_assert(evcon);
+ if (dns_base)
+ tt_assert(!evhttp_connection_set_flags(evcon, EVHTTP_CON_REUSE_CONNECTED_ADDR));
+
+ evhttp_connection_set_timeout(evcon, 1);
+ /* also bind to local host */
+ evhttp_connection_set_local_address(evcon, "127.0.0.1");
+
+ /*
+ * At this point, we want to schedule an HTTP GET request
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(http_connection_retry_done, data->base);
+ tt_assert(req);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET,
+ "/?arg=val") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ evutil_gettimeofday(&tv_start, NULL);
+ event_base_dispatch(data->base);
+ evutil_gettimeofday(&tv_end, NULL);
+ evutil_timersub(&tv_end, &tv_start, &tv_end);
+ tt_int_op(tv_end.tv_sec, <, 1);
+
+ tt_int_op(test_ok, ==, 1);
+
+ /*
+ * now test the same but with retries
+ */
+ test_ok = 0;
+ /** Shutdown dns server, to test conn_address reusing */
+ if (dns_base)
+ regress_clean_dnsserver();
+
+ {
+ const struct timeval tv_timeout = { 0, 500000 };
+ const struct timeval tv_retry = { 0, 500000 };
+ evhttp_connection_set_timeout_tv(evcon, &tv_timeout);
+ evhttp_connection_set_initial_retry_tv(evcon, &tv_retry);
+ }
+ evhttp_connection_set_retries(evcon, 1);
+
+ req = evhttp_request_new(http_connection_retry_done, data->base);
+ tt_assert(req);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET,
+ "/?arg=val") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ evutil_gettimeofday(&tv_start, NULL);
+ event_base_dispatch(data->base);
+ evutil_gettimeofday(&tv_end, NULL);
+
+ /* fails fast, .5 sec to wait to retry, fails fast again. */
+ test_timeval_diff_leq(&tv_start, &tv_end, 500, 200);
+
+ tt_assert(test_ok == 1);
+
+ /*
+ * now test the same but with retries and give it a web server
+ * at the end
+ */
+ test_ok = 0;
+
+ evhttp_connection_set_timeout(evcon, 1);
+ evhttp_connection_set_retries(evcon, 3);
+
+ req = evhttp_request_new(http_dispatcher_test_done, data->base);
+ tt_assert(req);
+
+ /* Add the information that we care about */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET,
+ "/?arg=val") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ /* start up a web server .2 seconds after the connection tried
+ * to send a request
+ */
+ evutil_timerclear(&tv);
+ tv.tv_usec = 200000;
+ http_make_web_server_base = data->base;
+ event_base_once(data->base, -1, EV_TIMEOUT, http_make_web_server, &hs, &tv);
+
+ evutil_gettimeofday(&tv_start, NULL);
+ event_base_dispatch(data->base);
+ evutil_gettimeofday(&tv_end, NULL);
+ /* We'll wait twice as long as we did last time. */
+ test_timeval_diff_leq(&tv_start, &tv_end, 1000, 400);
+
+ tt_int_op(test_ok, ==, 1);
+
+ end:
+ if (evcon)
+ evhttp_connection_free(evcon);
+ if (http)
+ evhttp_free(http);
+}
+
+static void
+http_connection_retry_conn_address_test_impl(void *arg, int ssl)
+{
+ struct basic_test_data *data = arg;
+ ev_uint16_t portnum = 0;
+ struct evdns_base *dns_base = NULL;
+ char address[64];
+
+ tt_assert(regress_dnsserver(data->base, &portnum, search_table));
+ dns_base = evdns_base_new(data->base, 0/* init name servers */);
+ tt_assert(dns_base);
+
+ /* Add ourself as the only nameserver, and make sure we really are
+ * the only nameserver. */
+ evutil_snprintf(address, sizeof(address), "127.0.0.1:%d", portnum);
+ evdns_base_nameserver_ip_add(dns_base, address);
+
+ http_connection_retry_test_basic(arg, "localhost", dns_base, ssl);
+
+ end:
+ if (dns_base)
+ evdns_base_free(dns_base, 0);
+ /** dnsserver will be cleaned in http_connection_retry_test_basic() */
+}
+static void http_connection_retry_conn_address_test(void *arg)
+{ return http_connection_retry_conn_address_test_impl(arg, 0); }
+
+static void
+http_connection_retry_test_impl(void *arg, int ssl)
+{
+ return http_connection_retry_test_basic(arg, "127.0.0.1", NULL, ssl);
+}
+static void
+http_connection_retry_test(void *arg)
+{ return http_connection_retry_test_impl(arg, 0); }
+
+static void
+http_primitives(void *ptr)
+{
+ char *escaped = NULL;
+ struct evhttp *http = NULL;
+
+ escaped = evhttp_htmlescape("<script>");
+ tt_assert(escaped);
+ tt_str_op(escaped, ==, "&lt;script&gt;");
+ free(escaped);
+
+ escaped = evhttp_htmlescape("\"\'&");
+ tt_assert(escaped);
+ tt_str_op(escaped, ==, "&quot;&#039;&amp;");
+
+ http = evhttp_new(NULL);
+ tt_assert(http);
+ tt_int_op(evhttp_set_cb(http, "/test", http_basic_cb, NULL), ==, 0);
+ tt_int_op(evhttp_set_cb(http, "/test", http_basic_cb, NULL), ==, -1);
+ tt_int_op(evhttp_del_cb(http, "/test"), ==, 0);
+ tt_int_op(evhttp_del_cb(http, "/test"), ==, -1);
+ tt_int_op(evhttp_set_cb(http, "/test", http_basic_cb, NULL), ==, 0);
+
+ end:
+ if (escaped)
+ free(escaped);
+ if (http)
+ evhttp_free(http);
+}
+
+static void
+http_multi_line_header_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct bufferevent *bev= NULL;
+ evutil_socket_t fd = -1;
+ const char *http_start_request;
+ ev_uint16_t port = 0;
+
+ test_ok = 0;
+
+ http = http_setup(&port, data->base, 0);
+
+ tt_ptr_op(http, !=, NULL);
+
+ fd = http_connect("127.0.0.1", port);
+
+ tt_int_op(fd, !=, -1);
+
+ /* Stupid thing to send a request */
+ bev = bufferevent_socket_new(data->base, fd, 0);
+ tt_ptr_op(bev, !=, NULL);
+ bufferevent_setcb(bev, http_readcb, http_writecb,
+ http_errorcb, data->base);
+
+ http_start_request =
+ "GET /test HTTP/1.1\r\n"
+ "Host: somehost\r\n"
+ "Connection: close\r\n"
+ "X-Multi-Extra-WS: libevent \r\n"
+ "\t\t\t2.1 \r\n"
+ "X-Multi: aaaaaaaa\r\n"
+ " a\r\n"
+ "\tEND\r\n"
+ "X-Last: last\r\n"
+ "\r\n";
+
+ bufferevent_write(bev, http_start_request, strlen(http_start_request));
+ found_multi = found_multi2 = 0;
+
+ event_base_dispatch(data->base);
+
+ tt_int_op(found_multi, ==, 1);
+ tt_int_op(found_multi2, ==, 1);
+ tt_int_op(test_ok, ==, 4);
+ end:
+ if (bev)
+ bufferevent_free(bev);
+ if (fd >= 0)
+ evutil_closesocket(fd);
+ if (http)
+ evhttp_free(http);
+}
+
+static void
+http_request_bad(struct evhttp_request *req, void *arg)
+{
+ if (req != NULL) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ test_ok = 1;
+ event_base_loopexit(arg, NULL);
+}
+
+static void
+http_negative_content_length_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ ev_uint16_t port = 0;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+
+ test_ok = 0;
+
+ http = http_setup(&port, data->base, 0);
+
+ evcon = evhttp_connection_base_new(data->base, NULL, "127.0.0.1", port);
+ tt_assert(evcon);
+
+ /*
+ * At this point, we want to schedule a request to the HTTP
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(http_request_bad, data->base);
+
+ /* Cause the response to have a negative content-length */
+ evhttp_add_header(evhttp_request_get_output_headers(req), "X-Negative", "makeitso");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ end:
+ if (evcon)
+ evhttp_connection_free(evcon);
+ if (http)
+ evhttp_free(http);
+}
+
+
+static void
+http_data_length_constraints_test_done(struct evhttp_request *req, void *arg)
+{
+ tt_assert(req);
+ tt_int_op(evhttp_request_get_response_code(req), ==, HTTP_BADREQUEST);
+end:
+ event_base_loopexit(arg, NULL);
+}
+
+static void
+http_large_entity_test_done(struct evhttp_request *req, void *arg)
+{
+ tt_assert(req);
+ tt_int_op(evhttp_request_get_response_code(req), ==, HTTP_ENTITYTOOLARGE);
+end:
+ event_base_loopexit(arg, NULL);
+}
+
+static void
+http_data_length_constraints_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ ev_uint16_t port = 0;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+ char long_str[8192];
+
+ test_ok = 0;
+
+ http = http_setup(&port, data->base, 0);
+
+ evcon = evhttp_connection_base_new(data->base, NULL, "127.0.0.1", port);
+ tt_assert(evcon);
+
+ /* also bind to local host */
+ evhttp_connection_set_local_address(evcon, "127.0.0.1");
+
+ /*
+ * At this point, we want to schedule an HTTP GET request
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(http_data_length_constraints_test_done, data->base);
+ tt_assert(req);
+
+ memset(long_str, 'a', 8192);
+ long_str[8191] = '\0';
+ /* Add the information that we care about */
+ evhttp_set_max_headers_size(http, 8191);
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Longheader", long_str);
+
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/?arg=val") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+ event_base_dispatch(data->base);
+
+ req = evhttp_request_new(http_data_length_constraints_test_done, data->base);
+ tt_assert(req);
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+
+ /* GET /?arg=verylongvalue HTTP/1.1 */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, long_str) == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+ event_base_dispatch(data->base);
+
+ evhttp_set_max_body_size(http, 8190);
+ req = evhttp_request_new(http_data_length_constraints_test_done, data->base);
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+ evbuffer_add_printf(evhttp_request_get_output_buffer(req), "%s", long_str);
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_POST, "/") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+ event_base_dispatch(data->base);
+
+ req = evhttp_request_new(http_large_entity_test_done, data->base);
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Host", "somehost");
+ evhttp_add_header(evhttp_request_get_output_headers(req), "Expect", "100-continue");
+ evbuffer_add_printf(evhttp_request_get_output_buffer(req), "%s", long_str);
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_POST, "/") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+ event_base_dispatch(data->base);
+
+ test_ok = 1;
+ end:
+ if (evcon)
+ evhttp_connection_free(evcon);
+ if (http)
+ evhttp_free(http);
+}
+
+/*
+ * Testing client reset of server chunked connections
+ */
+
+struct terminate_state {
+ struct event_base *base;
+ struct evhttp_request *req;
+ struct bufferevent *bev;
+ evutil_socket_t fd;
+ int gotclosecb: 1;
+ int oneshot: 1;
+};
+
+static void
+terminate_chunked_trickle_cb(evutil_socket_t fd, short events, void *arg)
+{
+ struct terminate_state *state = arg;
+ struct evbuffer *evb;
+
+ if (!state->req) {
+ return;
+ }
+
+ if (evhttp_request_get_connection(state->req) == NULL) {
+ test_ok = 1;
+ evhttp_request_free(state->req);
+ event_base_loopexit(state->base,NULL);
+ return;
+ }
+
+ evb = evbuffer_new();
+ evbuffer_add_printf(evb, "%p", evb);
+ evhttp_send_reply_chunk(state->req, evb);
+ evbuffer_free(evb);
+
+ if (!state->oneshot) {
+ struct timeval tv;
+ tv.tv_sec = 0;
+ tv.tv_usec = 3000;
+ EVUTIL_ASSERT(state);
+ EVUTIL_ASSERT(state->base);
+ event_base_once(state->base, -1, EV_TIMEOUT, terminate_chunked_trickle_cb, arg, &tv);
+ }
+}
+
+static void
+terminate_chunked_close_cb(struct evhttp_connection *evcon, void *arg)
+{
+ struct terminate_state *state = arg;
+ state->gotclosecb = 1;
+
+ /** TODO: though we can do this unconditionally */
+ if (state->oneshot) {
+ evhttp_request_free(state->req);
+ state->req = NULL;
+ event_base_loopexit(state->base,NULL);
+ }
+}
+
+static void
+terminate_chunked_cb(struct evhttp_request *req, void *arg)
+{
+ struct terminate_state *state = arg;
+ struct timeval tv;
+
+ /* we want to know if this connection closes on us */
+ evhttp_connection_set_closecb(
+ evhttp_request_get_connection(req),
+ terminate_chunked_close_cb, arg);
+
+ state->req = req;
+
+ evhttp_send_reply_start(req, HTTP_OK, "OK");
+
+ tv.tv_sec = 0;
+ tv.tv_usec = 3000;
+ event_base_once(state->base, -1, EV_TIMEOUT, terminate_chunked_trickle_cb, arg, &tv);
+}
+
+static void
+terminate_chunked_client(evutil_socket_t fd, short event, void *arg)
+{
+ struct terminate_state *state = arg;
+ bufferevent_free(state->bev);
+ evutil_closesocket(state->fd);
+}
+
+static void
+terminate_readcb(struct bufferevent *bev, void *arg)
+{
+ /* just drop the data */
+ evbuffer_drain(bufferevent_get_input(bev), -1);
+}
+
+
+static void
+http_terminate_chunked_test_impl(void *arg, int oneshot)
+{
+ struct basic_test_data *data = arg;
+ struct bufferevent *bev = NULL;
+ struct timeval tv;
+ const char *http_request;
+ ev_uint16_t port = 0;
+ evutil_socket_t fd = -1;
+ struct terminate_state terminate_state;
+
+ test_ok = 0;
+
+ http = http_setup(&port, data->base, 0);
+ evhttp_del_cb(http, "/test");
+ tt_assert(evhttp_set_cb(http, "/test",
+ terminate_chunked_cb, &terminate_state) == 0);
+
+ fd = http_connect("127.0.0.1", port);
+
+ /* Stupid thing to send a request */
+ bev = bufferevent_socket_new(data->base, fd, 0);
+ bufferevent_setcb(bev, terminate_readcb, http_writecb,
+ http_errorcb, data->base);
+
+ memset(&terminate_state, 0, sizeof(terminate_state));
+ terminate_state.base = data->base;
+ terminate_state.fd = fd;
+ terminate_state.bev = bev;
+ terminate_state.gotclosecb = 0;
+ terminate_state.oneshot = oneshot;
+
+ /* first half of the http request */
+ http_request =
+ "GET /test HTTP/1.1\r\n"
+ "Host: some\r\n\r\n";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+ evutil_timerclear(&tv);
+ tv.tv_usec = 10000;
+ event_base_once(data->base, -1, EV_TIMEOUT, terminate_chunked_client, &terminate_state,
+ &tv);
+
+ event_base_dispatch(data->base);
+
+ if (terminate_state.gotclosecb == 0)
+ test_ok = 0;
+
+ end:
+ if (fd >= 0)
+ evutil_closesocket(fd);
+ if (http)
+ evhttp_free(http);
+}
+static void
+http_terminate_chunked_test(void *arg)
+{
+ http_terminate_chunked_test_impl(arg, 0);
+}
+static void
+http_terminate_chunked_oneshot_test(void *arg)
+{
+ http_terminate_chunked_test_impl(arg, 1);
+}
+
+static struct regress_dns_server_table ipv6_search_table[] = {
+ { "localhost", "AAAA", "::1", 0, 0 },
+ { NULL, NULL, NULL, 0, 0 }
+};
+
+static void
+http_ipv6_for_domain_test_impl(void *arg, int family)
+{
+ struct basic_test_data *data = arg;
+ struct evdns_base *dns_base = NULL;
+ ev_uint16_t portnum = 0;
+ char address[64];
+
+ tt_assert(regress_dnsserver(data->base, &portnum, ipv6_search_table));
+
+ dns_base = evdns_base_new(data->base, 0/* init name servers */);
+ tt_assert(dns_base);
+
+ /* Add ourself as the only nameserver, and make sure we really are
+ * the only nameserver. */
+ evutil_snprintf(address, sizeof(address), "127.0.0.1:%d", portnum);
+ evdns_base_nameserver_ip_add(dns_base, address);
+
+ http_connection_test_(arg, 0 /* not persistent */, "localhost", dns_base,
+ 1 /* ipv6 */, family);
+
+ end:
+ if (dns_base)
+ evdns_base_free(dns_base, 0);
+ regress_clean_dnsserver();
+}
+static void
+http_ipv6_for_domain_test(void *arg)
+{
+ http_ipv6_for_domain_test_impl(arg, AF_UNSPEC);
+}
+
+static void
+http_request_get_addr_on_close(struct evhttp_connection *evcon, void *arg)
+{
+ const struct sockaddr *storage;
+ char addrbuf[128];
+ char local[] = "127.0.0.1:";
+
+ test_ok = 0;
+ tt_assert(evcon);
+
+ storage = evhttp_connection_get_addr(evcon);
+ tt_assert(storage);
+
+ evutil_format_sockaddr_port_((struct sockaddr *)storage, addrbuf, sizeof(addrbuf));
+ tt_assert(!strncmp(addrbuf, local, sizeof(local) - 1));
+
+ test_ok = 1;
+ return;
+
+end:
+ test_ok = 0;
+}
+
+static void
+http_get_addr_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ ev_uint16_t port = 0;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+
+ test_ok = 0;
+ exit_base = data->base;
+
+ http = http_setup(&port, data->base, 0);
+
+ evcon = evhttp_connection_base_new(data->base, NULL, "127.0.0.1", port);
+ tt_assert(evcon);
+ evhttp_connection_set_closecb(evcon, http_request_get_addr_on_close, arg);
+
+ /*
+ * At this point, we want to schedule a request to the HTTP
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(http_request_done, (void *)BASIC_REQUEST_BODY);
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+
+ event_base_dispatch(data->base);
+
+ http_request_get_addr_on_close(evcon, NULL);
+
+ end:
+ if (evcon)
+ evhttp_connection_free(evcon);
+ if (http)
+ evhttp_free(http);
+}
+
+static void
+http_set_family_test(void *arg)
+{
+ http_connection_test_(arg, 0, "127.0.0.1", NULL, 0, AF_UNSPEC);
+}
+static void
+http_set_family_ipv4_test(void *arg)
+{
+ http_connection_test_(arg, 0, "127.0.0.1", NULL, 0, AF_INET);
+}
+static void
+http_set_family_ipv6_test(void *arg)
+{
+ http_ipv6_for_domain_test_impl(arg, AF_INET6);
+}
+
+static void
+http_write_during_read(evutil_socket_t fd, short what, void *arg)
+{
+ struct bufferevent *bev = arg;
+ struct timeval tv;
+
+ bufferevent_write(bev, "foobar", strlen("foobar"));
+
+ evutil_timerclear(&tv);
+ tv.tv_sec = 1;
+ event_base_loopexit(exit_base, &tv);
+}
+static void
+http_write_during_read_test_impl(void *arg, int ssl)
+{
+ struct basic_test_data *data = arg;
+ ev_uint16_t port = 0;
+ struct bufferevent *bev = NULL;
+ struct timeval tv;
+ int fd;
+ const char *http_request;
+
+ test_ok = 0;
+ exit_base = data->base;
+
+ http = http_setup(&port, data->base, ssl ? HTTP_BIND_SSL : 0);
+
+ fd = http_connect("127.0.0.1", port);
+ bev = create_bev(data->base, fd, 0);
+ bufferevent_setcb(bev, NULL, NULL, NULL, data->base);
+ bufferevent_disable(bev, EV_READ);
+
+ http_request =
+ "GET /large HTTP/1.1\r\n"
+ "Host: somehost\r\n"
+ "\r\n";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+ evutil_timerclear(&tv);
+ tv.tv_usec = 10000;
+ event_base_once(data->base, -1, EV_TIMEOUT, http_write_during_read, bev, &tv);
+
+ event_base_dispatch(data->base);
+
+ if (bev)
+ bufferevent_free(bev);
+ if (http)
+ evhttp_free(http);
+}
+static void http_write_during_read_test(void *arg)
+{ return http_write_during_read_test_impl(arg, 0); }
+
+static void
+http_request_own_test(void *arg)
+{
+ struct basic_test_data *data = arg;
+ ev_uint16_t port = 0;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+
+ test_ok = 0;
+ exit_base = data->base;
+
+ http = http_setup(&port, data->base, 0);
+ evhttp_free(http);
+
+ evcon = evhttp_connection_base_new(data->base, NULL, "127.0.0.1", port);
+ tt_assert(evcon);
+
+ req = evhttp_request_new(http_request_no_action_done, NULL);
+
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+ tt_abort_msg("Couldn't make request");
+ }
+ evhttp_request_own(req);
+
+ event_base_dispatch(data->base);
+
+ end:
+ if (evcon)
+ evhttp_connection_free(evcon);
+ if (req)
+ evhttp_request_free(req);
+
+ test_ok = 1;
+}
+
+#define HTTP_LEGACY(name) \
+ { #name, run_legacy_test_fn, TT_ISOLATED|TT_LEGACY, &legacy_setup, \
+ http_##name##_test }
+
+#define HTTP(name) \
+ { #name, http_##name##_test, TT_ISOLATED, &basic_setup, NULL }
+#define HTTPS(name) \
+ { "https_" #name, https_##name##_test, TT_ISOLATED, &basic_setup, NULL }
+
+#ifdef EVENT__HAVE_OPENSSL
+static void https_basic_test(void *arg)
+{ return http_basic_test_impl(arg, 1); }
+static void https_incomplete_test(void *arg)
+{ http_incomplete_test_(arg, 0, 1); }
+static void https_incomplete_timeout_test(void *arg)
+{ http_incomplete_test_(arg, 1, 1); }
+static void https_simple_test(void *arg)
+{ return http_simple_test_impl(arg, 1, 0); }
+static void https_simple_dirty_test(void *arg)
+{ return http_simple_test_impl(arg, 1, 1); }
+static void https_connection_retry_conn_address_test(void *arg)
+{ return http_connection_retry_conn_address_test_impl(arg, 1); }
+static void https_connection_retry_test(void *arg)
+{ return http_connection_retry_test_impl(arg, 1); }
+static void https_chunk_out_test(void *arg)
+{ return http_chunk_out_test_impl(arg, 1); }
+static void https_stream_out_test(void *arg)
+{ return http_stream_out_test_impl(arg, 1); }
+static void https_connection_fail_test(void *arg)
+{ return http_connection_fail_test_impl(arg, 1); }
+static void https_write_during_read_test(void *arg)
+{ return http_write_during_read_test_impl(arg, 1); }
+#endif
+
+struct testcase_t http_testcases[] = {
+ { "primitives", http_primitives, 0, NULL, NULL },
+ { "base", http_base_test, TT_FORK, NULL, NULL },
+ { "bad_headers", http_bad_header_test, 0, NULL, NULL },
+ { "parse_query", http_parse_query_test, 0, NULL, NULL },
+ { "parse_uri", http_parse_uri_test, 0, NULL, NULL },
+ { "parse_uri_nc", http_parse_uri_test, 0, &basic_setup, (void*)"nc" },
+ { "uriencode", http_uriencode_test, 0, NULL, NULL },
+ HTTP(basic),
+ HTTP(simple),
+ HTTP(cancel),
+ HTTP(virtual_host),
+ HTTP(post),
+ HTTP(put),
+ HTTP(delete),
+ HTTP(allowed_methods),
+ HTTP(failure),
+ HTTP(connection),
+ HTTP(persist_connection),
+ HTTP(autofree_connection),
+ HTTP(connection_async),
+ HTTP(close_detection),
+ HTTP(close_detection_delay),
+ HTTP(bad_request),
+ HTTP(incomplete),
+ HTTP(incomplete_timeout),
+ HTTP(terminate_chunked),
+ HTTP(terminate_chunked_oneshot),
+ HTTP(on_complete),
+
+ HTTP(highport),
+ HTTP(dispatcher),
+ HTTP(multi_line_header),
+ HTTP(negative_content_length),
+ HTTP(chunk_out),
+ HTTP(stream_out),
+
+ HTTP(stream_in),
+ HTTP(stream_in_cancel),
+
+ HTTP(connection_fail),
+ { "connection_retry", http_connection_retry_test, TT_ISOLATED|TT_OFF_BY_DEFAULT, &basic_setup, NULL },
+ { "connection_retry_conn_address", http_connection_retry_conn_address_test,
+ TT_ISOLATED|TT_OFF_BY_DEFAULT, &basic_setup, NULL },
+
+ HTTP(data_length_constraints),
+
+ HTTP(ipv6_for_domain),
+ HTTP(get_addr),
+
+ HTTP(set_family),
+ HTTP(set_family_ipv4),
+ HTTP(set_family_ipv6),
+
+ HTTP(write_during_read),
+ HTTP(request_own),
+
+#ifdef EVENT__HAVE_OPENSSL
+ HTTPS(basic),
+ HTTPS(simple),
+ HTTPS(simple_dirty),
+ HTTPS(incomplete),
+ HTTPS(incomplete_timeout),
+ { "https_connection_retry", https_connection_retry_test, TT_ISOLATED|TT_OFF_BY_DEFAULT, &basic_setup, NULL },
+ { "https_connection_retry_conn_address", https_connection_retry_conn_address_test,
+ TT_ISOLATED|TT_OFF_BY_DEFAULT, &basic_setup, NULL },
+ HTTPS(chunk_out),
+ HTTPS(stream_out),
+ HTTPS(connection_fail),
+ HTTPS(write_during_read),
+#endif
+
+ END_OF_TESTCASES
+};
+
diff --git a/libs/libevent/docs/test/regress_iocp.c b/libs/libevent/docs/test/regress_iocp.c
new file mode 100644
index 0000000000..17b385241f
--- /dev/null
+++ b/libs/libevent/docs/test/regress_iocp.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include "event2/event.h"
+#include "event2/thread.h"
+#include "event2/buffer.h"
+#include "event2/buffer_compat.h"
+#include "event2/bufferevent.h"
+
+#include <winsock2.h>
+#include <ws2tcpip.h>
+
+#include "regress.h"
+#include "tinytest.h"
+#include "tinytest_macros.h"
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <winsock2.h>
+#undef WIN32_LEAN_AND_MEAN
+
+#include "iocp-internal.h"
+#include "evbuffer-internal.h"
+#include "evthread-internal.h"
+
+/* FIXME remove these ones */
+#include <sys/queue.h>
+#include "event2/event_struct.h"
+#include "event-internal.h"
+
+#define MAX_CALLS 16
+
+static void *count_lock = NULL, *count_cond = NULL;
+static int count = 0;
+
+static void
+count_init(void)
+{
+ EVTHREAD_ALLOC_LOCK(count_lock, 0);
+ EVTHREAD_ALLOC_COND(count_cond);
+
+ tt_assert(count_lock);
+ tt_assert(count_cond);
+
+end:
+ ;
+}
+
+static void
+count_free(void)
+{
+ EVTHREAD_FREE_LOCK(count_lock, 0);
+ EVTHREAD_FREE_COND(count_cond);
+}
+
+static void
+count_incr(void)
+{
+ EVLOCK_LOCK(count_lock, 0);
+ count++;
+ EVTHREAD_COND_BROADCAST(count_cond);
+ EVLOCK_UNLOCK(count_lock, 0);
+}
+
+static int
+count_wait_for(int i, int ms)
+{
+ struct timeval tv;
+ DWORD elapsed;
+ int rv = -1;
+
+ EVLOCK_LOCK(count_lock, 0);
+ while (ms > 0 && count != i) {
+ tv.tv_sec = 0;
+ tv.tv_usec = ms * 1000;
+ elapsed = GetTickCount();
+ EVTHREAD_COND_WAIT_TIMED(count_cond, count_lock, &tv);
+ elapsed = GetTickCount() - elapsed;
+ ms -= elapsed;
+ }
+ if (count == i)
+ rv = 0;
+ EVLOCK_UNLOCK(count_lock, 0);
+
+ return rv;
+}
+
+struct dummy_overlapped {
+ struct event_overlapped eo;
+ void *lock;
+ int call_count;
+ uintptr_t keys[MAX_CALLS];
+ ev_ssize_t sizes[MAX_CALLS];
+};
+
+static void
+dummy_cb(struct event_overlapped *o, uintptr_t key, ev_ssize_t n, int ok)
+{
+ struct dummy_overlapped *d_o =
+ EVUTIL_UPCAST(o, struct dummy_overlapped, eo);
+
+ EVLOCK_LOCK(d_o->lock, 0);
+ if (d_o->call_count < MAX_CALLS) {
+ d_o->keys[d_o->call_count] = key;
+ d_o->sizes[d_o->call_count] = n;
+ }
+ d_o->call_count++;
+ EVLOCK_UNLOCK(d_o->lock, 0);
+
+ count_incr();
+}
+
+static int
+pair_is_in(struct dummy_overlapped *o, uintptr_t key, ev_ssize_t n)
+{
+ int i;
+ int result = 0;
+ EVLOCK_LOCK(o->lock, 0);
+ for (i=0; i < o->call_count; ++i) {
+ if (o->keys[i] == key && o->sizes[i] == n) {
+ result = 1;
+ break;
+ }
+ }
+ EVLOCK_UNLOCK(o->lock, 0);
+ return result;
+}
+
+static void
+test_iocp_port(void *ptr)
+{
+ struct event_iocp_port *port = NULL;
+ struct dummy_overlapped o1, o2;
+
+ memset(&o1, 0, sizeof(o1));
+ memset(&o2, 0, sizeof(o2));
+
+ count_init();
+ EVTHREAD_ALLOC_LOCK(o1.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ EVTHREAD_ALLOC_LOCK(o2.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+
+ tt_assert(o1.lock);
+ tt_assert(o2.lock);
+
+ event_overlapped_init_(&o1.eo, dummy_cb);
+ event_overlapped_init_(&o2.eo, dummy_cb);
+
+ port = event_iocp_port_launch_(0);
+ tt_assert(port);
+
+ tt_assert(!event_iocp_activate_overlapped_(port, &o1.eo, 10, 100));
+ tt_assert(!event_iocp_activate_overlapped_(port, &o2.eo, 20, 200));
+
+ tt_assert(!event_iocp_activate_overlapped_(port, &o1.eo, 11, 101));
+ tt_assert(!event_iocp_activate_overlapped_(port, &o2.eo, 21, 201));
+
+ tt_assert(!event_iocp_activate_overlapped_(port, &o1.eo, 12, 102));
+ tt_assert(!event_iocp_activate_overlapped_(port, &o2.eo, 22, 202));
+
+ tt_assert(!event_iocp_activate_overlapped_(port, &o1.eo, 13, 103));
+ tt_assert(!event_iocp_activate_overlapped_(port, &o2.eo, 23, 203));
+
+ tt_int_op(count_wait_for(8, 2000), ==, 0);
+
+ tt_want(!event_iocp_shutdown_(port, 2000));
+
+ tt_int_op(o1.call_count, ==, 4);
+ tt_int_op(o2.call_count, ==, 4);
+
+ tt_want(pair_is_in(&o1, 10, 100));
+ tt_want(pair_is_in(&o1, 11, 101));
+ tt_want(pair_is_in(&o1, 12, 102));
+ tt_want(pair_is_in(&o1, 13, 103));
+
+ tt_want(pair_is_in(&o2, 20, 200));
+ tt_want(pair_is_in(&o2, 21, 201));
+ tt_want(pair_is_in(&o2, 22, 202));
+ tt_want(pair_is_in(&o2, 23, 203));
+
+end:
+ EVTHREAD_FREE_LOCK(o1.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ EVTHREAD_FREE_LOCK(o2.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ count_free();
+}
+
+static struct evbuffer *rbuf = NULL, *wbuf = NULL;
+
+static void
+read_complete(struct event_overlapped *eo, uintptr_t key,
+ ev_ssize_t nbytes, int ok)
+{
+ tt_assert(ok);
+ evbuffer_commit_read_(rbuf, nbytes);
+ count_incr();
+end:
+ ;
+}
+
+static void
+write_complete(struct event_overlapped *eo, uintptr_t key,
+ ev_ssize_t nbytes, int ok)
+{
+ tt_assert(ok);
+ evbuffer_commit_write_(wbuf, nbytes);
+ count_incr();
+end:
+ ;
+}
+
+static void
+test_iocp_evbuffer(void *ptr)
+{
+ struct event_overlapped rol, wol;
+ struct basic_test_data *data = ptr;
+ struct event_iocp_port *port = NULL;
+ struct evbuffer *buf=NULL;
+ struct evbuffer_chain *chain;
+ char junk[1024];
+ int i;
+
+ count_init();
+ event_overlapped_init_(&rol, read_complete);
+ event_overlapped_init_(&wol, write_complete);
+
+ for (i = 0; i < (int)sizeof(junk); ++i)
+ junk[i] = (char)(i);
+
+ rbuf = evbuffer_overlapped_new_(data->pair[0]);
+ wbuf = evbuffer_overlapped_new_(data->pair[1]);
+ evbuffer_enable_locking(rbuf, NULL);
+ evbuffer_enable_locking(wbuf, NULL);
+
+ port = event_iocp_port_launch_(0);
+ tt_assert(port);
+ tt_assert(rbuf);
+ tt_assert(wbuf);
+
+ tt_assert(!event_iocp_port_associate_(port, data->pair[0], 100));
+ tt_assert(!event_iocp_port_associate_(port, data->pair[1], 100));
+
+ for (i=0;i<10;++i)
+ evbuffer_add(wbuf, junk, sizeof(junk));
+
+ buf = evbuffer_new();
+ tt_assert(buf != NULL);
+ evbuffer_add(rbuf, junk, sizeof(junk));
+ tt_assert(!evbuffer_launch_read_(rbuf, 2048, &rol));
+ evbuffer_add_buffer(buf, rbuf);
+ tt_int_op(evbuffer_get_length(buf), ==, sizeof(junk));
+ for (chain = buf->first; chain; chain = chain->next)
+ tt_int_op(chain->flags & EVBUFFER_MEM_PINNED_ANY, ==, 0);
+ tt_assert(!evbuffer_get_length(rbuf));
+ tt_assert(!evbuffer_launch_write_(wbuf, 512, &wol));
+
+ tt_int_op(count_wait_for(2, 2000), ==, 0);
+
+ tt_int_op(evbuffer_get_length(rbuf),==,512);
+
+ /* FIXME Actually test some stuff here. */
+
+ tt_want(!event_iocp_shutdown_(port, 2000));
+end:
+ count_free();
+ evbuffer_free(rbuf);
+ evbuffer_free(wbuf);
+ if (buf) evbuffer_free(buf);
+}
+
+static int got_readcb = 0;
+
+static void
+async_readcb(struct bufferevent *bev, void *arg)
+{
+ /* Disabling read should cause the loop to quit */
+ bufferevent_disable(bev, EV_READ);
+ got_readcb++;
+}
+
+static void
+test_iocp_bufferevent_async(void *ptr)
+{
+ struct basic_test_data *data = ptr;
+ struct event_iocp_port *port = NULL;
+ struct bufferevent *bea1=NULL, *bea2=NULL;
+ char buf[128];
+ size_t n;
+
+ event_base_start_iocp_(data->base, 0);
+ port = event_base_get_iocp_(data->base);
+ tt_assert(port);
+
+ bea1 = bufferevent_async_new_(data->base, data->pair[0],
+ BEV_OPT_DEFER_CALLBACKS);
+ bea2 = bufferevent_async_new_(data->base, data->pair[1],
+ BEV_OPT_DEFER_CALLBACKS);
+ tt_assert(bea1);
+ tt_assert(bea2);
+
+ bufferevent_setcb(bea2, async_readcb, NULL, NULL, NULL);
+ bufferevent_enable(bea1, EV_WRITE);
+ bufferevent_enable(bea2, EV_READ);
+
+ bufferevent_write(bea1, "Hello world", strlen("Hello world")+1);
+
+ event_base_dispatch(data->base);
+
+ tt_int_op(got_readcb, ==, 1);
+ n = bufferevent_read(bea2, buf, sizeof(buf)-1);
+ buf[n]='\0';
+ tt_str_op(buf, ==, "Hello world");
+
+end:
+ bufferevent_free(bea1);
+ bufferevent_free(bea2);
+}
+
+
+struct testcase_t iocp_testcases[] = {
+ { "port", test_iocp_port, TT_FORK|TT_NEED_THREADS, &basic_setup, NULL },
+ { "evbuffer", test_iocp_evbuffer,
+ TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_THREADS,
+ &basic_setup, NULL },
+ { "bufferevent_async", test_iocp_bufferevent_async,
+ TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_THREADS|TT_NEED_BASE,
+ &basic_setup, NULL },
+ END_OF_TESTCASES
+};
diff --git a/libs/libevent/docs/test/regress_listener.c b/libs/libevent/docs/test/regress_listener.c
new file mode 100644
index 0000000000..4db102df68
--- /dev/null
+++ b/libs/libevent/docs/test/regress_listener.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "util-internal.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+#include <sys/types.h>
+
+#ifndef _WIN32
+#include <sys/socket.h>
+#include <netinet/in.h>
+# ifdef _XOPEN_SOURCE_EXTENDED
+# include <arpa/inet.h>
+# endif
+#include <unistd.h>
+#endif
+
+#include <string.h>
+
+#include "event2/listener.h"
+#include "event2/event.h"
+#include "event2/util.h"
+
+#include "regress.h"
+#include "tinytest.h"
+#include "tinytest_macros.h"
+
+static void
+acceptcb(struct evconnlistener *listener, evutil_socket_t fd,
+ struct sockaddr *addr, int socklen, void *arg)
+{
+ int *ptr = arg;
+ --*ptr;
+ TT_BLATHER(("Got one for %p", ptr));
+ evutil_closesocket(fd);
+
+ if (! *ptr)
+ evconnlistener_disable(listener);
+}
+
+static void
+regress_pick_a_port(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+ struct evconnlistener *listener1 = NULL, *listener2 = NULL;
+ struct sockaddr_in sin;
+ int count1 = 2, count2 = 1;
+ struct sockaddr_storage ss1, ss2;
+ struct sockaddr_in *sin1, *sin2;
+ ev_socklen_t slen1 = sizeof(ss1), slen2 = sizeof(ss2);
+ unsigned int flags =
+ LEV_OPT_CLOSE_ON_FREE|LEV_OPT_REUSEABLE|LEV_OPT_CLOSE_ON_EXEC;
+
+ evutil_socket_t fd1 = -1, fd2 = -1, fd3 = -1;
+
+ if (data->setup_data && strstr((char*)data->setup_data, "ts")) {
+ flags |= LEV_OPT_THREADSAFE;
+ }
+
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = htonl(0x7f000001); /* 127.0.0.1 */
+ sin.sin_port = 0; /* "You pick!" */
+
+ listener1 = evconnlistener_new_bind(base, acceptcb, &count1,
+ flags, -1, (struct sockaddr *)&sin, sizeof(sin));
+ tt_assert(listener1);
+ listener2 = evconnlistener_new_bind(base, acceptcb, &count2,
+ flags, -1, (struct sockaddr *)&sin, sizeof(sin));
+ tt_assert(listener2);
+
+ tt_int_op(evconnlistener_get_fd(listener1), >=, 0);
+ tt_int_op(evconnlistener_get_fd(listener2), >=, 0);
+ tt_assert(getsockname(evconnlistener_get_fd(listener1),
+ (struct sockaddr*)&ss1, &slen1) == 0);
+ tt_assert(getsockname(evconnlistener_get_fd(listener2),
+ (struct sockaddr*)&ss2, &slen2) == 0);
+ tt_int_op(ss1.ss_family, ==, AF_INET);
+ tt_int_op(ss2.ss_family, ==, AF_INET);
+
+ sin1 = (struct sockaddr_in*)&ss1;
+ sin2 = (struct sockaddr_in*)&ss2;
+ tt_int_op(ntohl(sin1->sin_addr.s_addr), ==, 0x7f000001);
+ tt_int_op(ntohl(sin2->sin_addr.s_addr), ==, 0x7f000001);
+ tt_int_op(sin1->sin_port, !=, sin2->sin_port);
+
+ tt_ptr_op(evconnlistener_get_base(listener1), ==, base);
+ tt_ptr_op(evconnlistener_get_base(listener2), ==, base);
+
+ fd1 = fd2 = fd3 = -1;
+ evutil_socket_connect_(&fd1, (struct sockaddr*)&ss1, slen1);
+ evutil_socket_connect_(&fd2, (struct sockaddr*)&ss1, slen1);
+ evutil_socket_connect_(&fd3, (struct sockaddr*)&ss2, slen2);
+
+#ifdef _WIN32
+ Sleep(100); /* XXXX this is a stupid stopgap. */
+#endif
+ event_base_dispatch(base);
+
+ tt_int_op(count1, ==, 0);
+ tt_int_op(count2, ==, 0);
+
+end:
+ if (fd1>=0)
+ EVUTIL_CLOSESOCKET(fd1);
+ if (fd2>=0)
+ EVUTIL_CLOSESOCKET(fd2);
+ if (fd3>=0)
+ EVUTIL_CLOSESOCKET(fd3);
+ if (listener1)
+ evconnlistener_free(listener1);
+ if (listener2)
+ evconnlistener_free(listener2);
+}
+
+static void
+errorcb(struct evconnlistener *lis, void *data_)
+{
+ int *data = data_;
+ *data = 1000;
+ evconnlistener_disable(lis);
+}
+
+static void
+regress_listener_error(void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+ struct evconnlistener *listener = NULL;
+ int count = 1;
+ unsigned int flags = LEV_OPT_CLOSE_ON_FREE|LEV_OPT_REUSEABLE;
+
+ if (data->setup_data && strstr((char*)data->setup_data, "ts")) {
+ flags |= LEV_OPT_THREADSAFE;
+ }
+
+ /* send, so that pair[0] will look 'readable'*/
+ tt_int_op(send(data->pair[1], "hello", 5, 0), >, 0);
+
+ /* Start a listener with a bogus socket. */
+ listener = evconnlistener_new(base, acceptcb, &count,
+ flags, 0,
+ data->pair[0]);
+ tt_assert(listener);
+
+ evconnlistener_set_error_cb(listener, errorcb);
+
+ tt_assert(listener);
+
+ event_base_dispatch(base);
+ tt_int_op(count,==,1000); /* set by error cb */
+
+end:
+ if (listener)
+ evconnlistener_free(listener);
+}
+
+struct testcase_t listener_testcases[] = {
+
+ { "randport", regress_pick_a_port, TT_FORK|TT_NEED_BASE,
+ &basic_setup, NULL},
+
+ { "randport_ts", regress_pick_a_port, TT_FORK|TT_NEED_BASE,
+ &basic_setup, (char*)"ts"},
+
+ { "error", regress_listener_error,
+ TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR,
+ &basic_setup, NULL},
+
+ { "error_ts", regress_listener_error,
+ TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR,
+ &basic_setup, (char*)"ts"},
+
+ END_OF_TESTCASES,
+};
+
+struct testcase_t listener_iocp_testcases[] = {
+ { "randport", regress_pick_a_port,
+ TT_FORK|TT_NEED_BASE|TT_ENABLE_IOCP,
+ &basic_setup, NULL},
+
+ { "error", regress_listener_error,
+ TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR|TT_ENABLE_IOCP,
+ &basic_setup, NULL},
+
+ END_OF_TESTCASES,
+};
diff --git a/libs/libevent/docs/test/regress_main.c b/libs/libevent/docs/test/regress_main.c
new file mode 100644
index 0000000000..6d045bb833
--- /dev/null
+++ b/libs/libevent/docs/test/regress_main.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright (c) 2003-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "util-internal.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <windows.h>
+#include <io.h>
+#include <fcntl.h>
+#endif
+
+#if defined(__APPLE__) && defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__)
+#if (__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1060 && \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1070)
+#define FORK_BREAKS_GCOV
+#include <vproc.h>
+#endif
+#endif
+
+#include "event2/event-config.h"
+
+#ifdef EVENT____func__
+#define __func__ EVENT____func__
+#endif
+
+#if 0
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#include <signal.h>
+#include <errno.h>
+#endif
+
+#include <sys/types.h>
+#ifdef EVENT__HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+#ifndef _WIN32
+#include <sys/socket.h>
+#include <sys/wait.h>
+#include <signal.h>
+#include <unistd.h>
+#include <netdb.h>
+#endif
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "event2/util.h"
+#include "event2/event.h"
+#include "event2/event_compat.h"
+#include "event2/dns.h"
+#include "event2/dns_compat.h"
+#include "event2/thread.h"
+
+#include "event2/event-config.h"
+#include "regress.h"
+#include "tinytest.h"
+#include "tinytest_macros.h"
+#include "../iocp-internal.h"
+#include "../event-internal.h"
+
+struct evutil_weakrand_state test_weakrand_state;
+
+long
+timeval_msec_diff(const struct timeval *start, const struct timeval *end)
+{
+ long ms = end->tv_sec - start->tv_sec;
+ ms *= 1000;
+ ms += ((end->tv_usec - start->tv_usec)+500) / 1000;
+ return ms;
+}
+
+/* ============================================================ */
+/* Code to wrap up old legacy test cases that used setup() and cleanup().
+ *
+ * Not all of the tests designated "legacy" are ones that used setup() and
+ * cleanup(), of course. A test is legacy it it uses setup()/cleanup(), OR
+ * if it wants to find its event base/socketpair in global variables (ugh),
+ * OR if it wants to communicate success/failure through test_ok.
+ */
+
+/* This is set to true if we're inside a legacy test wrapper. It lets the
+ setup() and cleanup() functions in regress.c know they're not needed.
+ */
+int in_legacy_test_wrapper = 0;
+
+static void dnslogcb(int w, const char *m)
+{
+ TT_BLATHER(("%s", m));
+}
+
+/* creates a temporary file with the data in it. If *filename_out gets set,
+ * the caller should try to unlink it. */
+int
+regress_make_tmpfile(const void *data, size_t datalen, char **filename_out)
+{
+#ifndef _WIN32
+ char tmpfilename[32];
+ int fd;
+ *filename_out = NULL;
+ strcpy(tmpfilename, "/tmp/eventtmp.XXXXXX");
+#ifdef EVENT__HAVE_UMASK
+ umask(0077);
+#endif
+ fd = mkstemp(tmpfilename);
+ if (fd == -1)
+ return (-1);
+ if (write(fd, data, datalen) != (int)datalen) {
+ close(fd);
+ return (-1);
+ }
+ lseek(fd, 0, SEEK_SET);
+ /* remove it from the file system */
+ unlink(tmpfilename);
+ return (fd);
+#else
+ /* XXXX actually delete the file later */
+ char tmpfilepath[MAX_PATH];
+ char tmpfilename[MAX_PATH];
+ DWORD r, written;
+ int tries = 16;
+ HANDLE h;
+ r = GetTempPathA(MAX_PATH, tmpfilepath);
+ if (r > MAX_PATH || r == 0)
+ return (-1);
+ for (; tries > 0; --tries) {
+ r = GetTempFileNameA(tmpfilepath, "LIBEVENT", 0, tmpfilename);
+ if (r == 0)
+ return (-1);
+ h = CreateFileA(tmpfilename, GENERIC_READ|GENERIC_WRITE,
+ 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
+ if (h != INVALID_HANDLE_VALUE)
+ break;
+ }
+ if (tries == 0)
+ return (-1);
+ written = 0;
+ *filename_out = strdup(tmpfilename);
+ WriteFile(h, data, (DWORD)datalen, &written, NULL);
+ /* Closing the fd returned by this function will indeed close h. */
+ return _open_osfhandle((intptr_t)h,_O_RDONLY);
+#endif
+}
+
+#ifndef _WIN32
+pid_t
+regress_fork(void)
+{
+ pid_t pid = fork();
+#ifdef FORK_BREAKS_GCOV
+ vproc_transaction_begin(0);
+#endif
+ return pid;
+}
+#endif
+
+static void
+ignore_log_cb(int s, const char *msg)
+{
+}
+
+static void *
+basic_test_setup(const struct testcase_t *testcase)
+{
+ struct event_base *base = NULL;
+ evutil_socket_t spair[2] = { -1, -1 };
+ struct basic_test_data *data = NULL;
+
+#ifndef _WIN32
+ if (testcase->flags & TT_ENABLE_IOCP_FLAG)
+ return (void*)TT_SKIP;
+#endif
+
+ if (testcase->flags & TT_NEED_THREADS) {
+ if (!(testcase->flags & TT_FORK))
+ return NULL;
+#if defined(EVTHREAD_USE_PTHREADS_IMPLEMENTED)
+ if (evthread_use_pthreads())
+ exit(1);
+#elif defined(EVTHREAD_USE_WINDOWS_THREADS_IMPLEMENTED)
+ if (evthread_use_windows_threads())
+ exit(1);
+#else
+ return (void*)TT_SKIP;
+#endif
+ }
+
+ if (testcase->flags & TT_NEED_SOCKETPAIR) {
+ if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, spair) == -1) {
+ fprintf(stderr, "%s: socketpair\n", __func__);
+ exit(1);
+ }
+
+ if (evutil_make_socket_nonblocking(spair[0]) == -1) {
+ fprintf(stderr, "fcntl(O_NONBLOCK)");
+ exit(1);
+ }
+
+ if (evutil_make_socket_nonblocking(spair[1]) == -1) {
+ fprintf(stderr, "fcntl(O_NONBLOCK)");
+ exit(1);
+ }
+ }
+ if (testcase->flags & TT_NEED_BASE) {
+ if (testcase->flags & TT_LEGACY)
+ base = event_init();
+ else
+ base = event_base_new();
+ if (!base)
+ exit(1);
+ }
+ if (testcase->flags & TT_ENABLE_IOCP_FLAG) {
+ if (event_base_start_iocp_(base, 0)<0) {
+ event_base_free(base);
+ return (void*)TT_SKIP;
+ }
+ }
+
+ if (testcase->flags & TT_NEED_DNS) {
+ evdns_set_log_fn(dnslogcb);
+ if (evdns_init())
+ return NULL; /* fast failure */ /*XXX asserts. */
+ }
+
+ if (testcase->flags & TT_NO_LOGS)
+ event_set_log_callback(ignore_log_cb);
+
+ data = calloc(1, sizeof(*data));
+ if (!data)
+ exit(1);
+ data->base = base;
+ data->pair[0] = spair[0];
+ data->pair[1] = spair[1];
+ data->setup_data = testcase->setup_data;
+ return data;
+}
+
+static int
+basic_test_cleanup(const struct testcase_t *testcase, void *ptr)
+{
+ struct basic_test_data *data = ptr;
+
+ if (testcase->flags & TT_NO_LOGS)
+ event_set_log_callback(NULL);
+
+ if (testcase->flags & TT_NEED_SOCKETPAIR) {
+ if (data->pair[0] != -1)
+ evutil_closesocket(data->pair[0]);
+ if (data->pair[1] != -1)
+ evutil_closesocket(data->pair[1]);
+ }
+
+ if (testcase->flags & TT_NEED_DNS) {
+ evdns_shutdown(0);
+ }
+
+ if (testcase->flags & TT_NEED_BASE) {
+ if (data->base) {
+ event_base_assert_ok_(data->base);
+ event_base_free(data->base);
+ }
+ }
+
+ if (testcase->flags & TT_FORK)
+ libevent_global_shutdown();
+
+ free(data);
+
+ return 1;
+}
+
+const struct testcase_setup_t basic_setup = {
+ basic_test_setup, basic_test_cleanup
+};
+
+/* The "data" for a legacy test is just a pointer to the void fn(void)
+ function implementing the test case. We need to set up some globals,
+ though, since that's where legacy tests expect to find a socketpair
+ (sometimes) and a global event_base (sometimes).
+ */
+static void *
+legacy_test_setup(const struct testcase_t *testcase)
+{
+ struct basic_test_data *data = basic_test_setup(testcase);
+ if (data == (void*)TT_SKIP || data == NULL)
+ return data;
+ global_base = data->base;
+ pair[0] = data->pair[0];
+ pair[1] = data->pair[1];
+ data->legacy_test_fn = testcase->setup_data;
+ return data;
+}
+
+/* This function is the implementation of every legacy test case. It
+ sets test_ok to 0, invokes the test function, and tells tinytest that
+ the test failed if the test didn't set test_ok to 1.
+ */
+void
+run_legacy_test_fn(void *ptr)
+{
+ struct basic_test_data *data = ptr;
+ test_ok = called = 0;
+
+ in_legacy_test_wrapper = 1;
+ data->legacy_test_fn(); /* This part actually calls the test */
+ in_legacy_test_wrapper = 0;
+
+ if (!test_ok)
+ tt_abort_msg("Legacy unit test failed");
+
+end:
+ test_ok = 0;
+}
+
+/* This function doesn't have to clean up ptr (which is just a pointer
+ to the test function), but it may need to close the socketpair or
+ free the event_base.
+ */
+static int
+legacy_test_cleanup(const struct testcase_t *testcase, void *ptr)
+{
+ int r = basic_test_cleanup(testcase, ptr);
+ pair[0] = pair[1] = -1;
+ global_base = NULL;
+ return r;
+}
+
+const struct testcase_setup_t legacy_setup = {
+ legacy_test_setup, legacy_test_cleanup
+};
+
+/* ============================================================ */
+
+#if (!defined(EVENT__HAVE_PTHREADS) && !defined(_WIN32)) || defined(EVENT__DISABLE_THREAD_SUPPORT)
+struct testcase_t thread_testcases[] = {
+ { "basic", NULL, TT_SKIP, NULL, NULL },
+ END_OF_TESTCASES
+};
+#endif
+
+struct testgroup_t testgroups[] = {
+ { "main/", main_testcases },
+ { "heap/", minheap_testcases },
+ { "et/", edgetriggered_testcases },
+ { "finalize/", finalize_testcases },
+ { "evbuffer/", evbuffer_testcases },
+ { "signal/", signal_testcases },
+ { "util/", util_testcases },
+ { "bufferevent/", bufferevent_testcases },
+ { "http/", http_testcases },
+ { "dns/", dns_testcases },
+ { "evtag/", evtag_testcases },
+ { "rpc/", rpc_testcases },
+ { "thread/", thread_testcases },
+ { "listener/", listener_testcases },
+#ifdef _WIN32
+ { "iocp/", iocp_testcases },
+ { "iocp/bufferevent/", bufferevent_iocp_testcases },
+ { "iocp/listener/", listener_iocp_testcases },
+#endif
+#ifdef EVENT__HAVE_OPENSSL
+ { "ssl/", ssl_testcases },
+#endif
+ END_OF_GROUPS
+};
+
+const char *alltests[] = { "+..", NULL };
+const char *livenettests[] = {
+ "+util/getaddrinfo_live",
+ "+dns/gethostby..",
+ "+dns/resolve_reverse",
+ NULL
+};
+const char *finetimetests[] = {
+ "+util/monotonic_res_precise",
+ "+util/monotonic_res_fallback",
+ "+thread/deferred_cb_skew",
+ "+http/connection_retry",
+ "+http/https_connection_retry",
+ NULL
+};
+struct testlist_alias_t testaliases[] = {
+ { "all", alltests },
+ { "live_net", livenettests },
+ { "fine_timing", finetimetests },
+ END_OF_ALIASES
+};
+
+int libevent_tests_running_in_debug_mode = 0;
+
+int
+main(int argc, const char **argv)
+{
+#ifdef _WIN32
+ WORD wVersionRequested;
+ WSADATA wsaData;
+
+ wVersionRequested = MAKEWORD(2, 2);
+
+ (void) WSAStartup(wVersionRequested, &wsaData);
+#endif
+
+#ifndef _WIN32
+ if (signal(SIGPIPE, SIG_IGN) == SIG_ERR)
+ return 1;
+#endif
+
+#ifdef _WIN32
+ tinytest_skip(testgroups, "http/connection_retry");
+ tinytest_skip(testgroups, "http/https_connection_retry");
+#endif
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (!getenv("EVENT_NO_DEBUG_LOCKS"))
+ evthread_enable_lock_debugging();
+#endif
+
+ if (getenv("EVENT_DEBUG_MODE")) {
+ event_enable_debug_mode();
+ libevent_tests_running_in_debug_mode = 1;
+ }
+ if (getenv("EVENT_DEBUG_LOGGING_ALL")) {
+ event_enable_debug_logging(EVENT_DBG_ALL);
+ }
+
+ tinytest_set_aliases(testaliases);
+
+ evutil_weakrand_seed_(&test_weakrand_state, 0);
+
+ if (tinytest_main(argc,argv,testgroups))
+ return 1;
+
+ libevent_global_shutdown();
+
+ return 0;
+}
+
diff --git a/libs/libevent/docs/test/regress_minheap.c b/libs/libevent/docs/test/regress_minheap.c
new file mode 100644
index 0000000000..05db32e26f
--- /dev/null
+++ b/libs/libevent/docs/test/regress_minheap.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "../minheap-internal.h"
+
+#include <stdlib.h>
+#include "event2/event_struct.h"
+
+#include "tinytest.h"
+#include "tinytest_macros.h"
+#include "regress.h"
+
+static void
+set_random_timeout(struct event *ev)
+{
+ ev->ev_timeout.tv_sec = test_weakrand();
+ ev->ev_timeout.tv_usec = test_weakrand() & 0xfffff;
+ ev->ev_timeout_pos.min_heap_idx = -1;
+}
+
+static void
+check_heap(struct min_heap *heap)
+{
+ unsigned i;
+ for (i = 1; i < heap->n; ++i) {
+ unsigned parent_idx = (i-1)/2;
+ tt_want(evutil_timercmp(&heap->p[i]->ev_timeout,
+ &heap->p[parent_idx]->ev_timeout, >=));
+ }
+}
+
+static void
+test_heap_randomized(void *ptr)
+{
+ struct min_heap heap;
+ struct event *inserted[1024];
+ struct event *e, *last_e;
+ int i;
+
+ min_heap_ctor_(&heap);
+
+ for (i = 0; i < 1024; ++i) {
+ inserted[i] = malloc(sizeof(struct event));
+ set_random_timeout(inserted[i]);
+ min_heap_push_(&heap, inserted[i]);
+ }
+ check_heap(&heap);
+
+ tt_assert(min_heap_size_(&heap) == 1024);
+
+ for (i = 0; i < 512; ++i) {
+ min_heap_erase_(&heap, inserted[i]);
+ if (0 == (i % 32))
+ check_heap(&heap);
+ }
+ tt_assert(min_heap_size_(&heap) == 512);
+
+ last_e = min_heap_pop_(&heap);
+ while (1) {
+ e = min_heap_pop_(&heap);
+ if (!e)
+ break;
+ tt_want(evutil_timercmp(&last_e->ev_timeout,
+ &e->ev_timeout, <=));
+ }
+ tt_assert(min_heap_size_(&heap) == 0);
+end:
+ for (i = 0; i < 1024; ++i)
+ free(inserted[i]);
+
+ min_heap_dtor_(&heap);
+}
+
+struct testcase_t minheap_testcases[] = {
+ { "randomized", test_heap_randomized, 0, NULL, NULL },
+ END_OF_TESTCASES
+};
diff --git a/libs/libevent/docs/test/regress_rpc.c b/libs/libevent/docs/test/regress_rpc.c
new file mode 100644
index 0000000000..01a058cbb2
--- /dev/null
+++ b/libs/libevent/docs/test/regress_rpc.c
@@ -0,0 +1,905 @@
+/*
+ * Copyright (c) 2003-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* The old tests here need assertions to work. */
+#undef NDEBUG
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+#include "event2/event-config.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifndef _WIN32
+#include <sys/socket.h>
+#include <signal.h>
+#include <unistd.h>
+#include <netdb.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+
+#include "event2/buffer.h"
+#include "event2/event.h"
+#include "event2/event_compat.h"
+#include "event2/http.h"
+#include "event2/http_compat.h"
+#include "event2/http_struct.h"
+#include "event2/rpc.h"
+#include "event2/rpc.h"
+#include "event2/rpc_struct.h"
+#include "event2/tag.h"
+#include "log-internal.h"
+
+#include "regress.gen.h"
+
+#include "regress.h"
+#include "regress_testutils.h"
+
+#ifndef NO_PYTHON_EXISTS
+
+static struct evhttp *
+http_setup(ev_uint16_t *pport)
+{
+ struct evhttp *myhttp;
+ ev_uint16_t port;
+ struct evhttp_bound_socket *sock;
+
+ myhttp = evhttp_new(NULL);
+ if (!myhttp)
+ event_errx(1, "Could not start web server");
+
+ /* Try a few different ports */
+ sock = evhttp_bind_socket_with_handle(myhttp, "127.0.0.1", 0);
+ if (!sock)
+ event_errx(1, "Couldn't open web port");
+
+ port = regress_get_socket_port(evhttp_bound_socket_get_fd(sock));
+
+ *pport = port;
+ return (myhttp);
+}
+
+EVRPC_HEADER(Message, msg, kill)
+EVRPC_HEADER(NeverReply, msg, kill)
+
+EVRPC_GENERATE(Message, msg, kill)
+EVRPC_GENERATE(NeverReply, msg, kill)
+
+static int need_input_hook = 0;
+static int need_output_hook = 0;
+
+static void
+MessageCb(EVRPC_STRUCT(Message)* rpc, void *arg)
+{
+ struct kill* kill_reply = rpc->reply;
+
+ if (need_input_hook) {
+ struct evhttp_request* req = EVRPC_REQUEST_HTTP(rpc);
+ const char *header = evhttp_find_header(
+ req->input_headers, "X-Hook");
+ assert(header);
+ assert(strcmp(header, "input") == 0);
+ }
+
+ /* we just want to fill in some non-sense */
+ EVTAG_ASSIGN(kill_reply, weapon, "dagger");
+ EVTAG_ASSIGN(kill_reply, action, "wave around like an idiot");
+
+ /* no reply to the RPC */
+ EVRPC_REQUEST_DONE(rpc);
+}
+
+static EVRPC_STRUCT(NeverReply) *saved_rpc;
+
+static void
+NeverReplyCb(EVRPC_STRUCT(NeverReply)* rpc, void *arg)
+{
+ test_ok += 1;
+ saved_rpc = rpc;
+}
+
+static void
+rpc_setup(struct evhttp **phttp, ev_uint16_t *pport, struct evrpc_base **pbase)
+{
+ ev_uint16_t port;
+ struct evhttp *http = NULL;
+ struct evrpc_base *base = NULL;
+
+ http = http_setup(&port);
+ base = evrpc_init(http);
+
+ EVRPC_REGISTER(base, Message, msg, kill, MessageCb, NULL);
+ EVRPC_REGISTER(base, NeverReply, msg, kill, NeverReplyCb, NULL);
+
+ *phttp = http;
+ *pport = port;
+ *pbase = base;
+
+ need_input_hook = 0;
+ need_output_hook = 0;
+}
+
+static void
+rpc_teardown(struct evrpc_base *base)
+{
+ assert(EVRPC_UNREGISTER(base, Message) == 0);
+ assert(EVRPC_UNREGISTER(base, NeverReply) == 0);
+
+ evrpc_free(base);
+}
+
+static void
+rpc_postrequest_failure(struct evhttp_request *req, void *arg)
+{
+ if (req->response_code != HTTP_SERVUNAVAIL) {
+
+ fprintf(stderr, "FAILED (response code)\n");
+ exit(1);
+ }
+
+ test_ok = 1;
+ event_loopexit(NULL);
+}
+
+/*
+ * Test a malformed payload submitted as an RPC
+ */
+
+static void
+rpc_basic_test(void)
+{
+ ev_uint16_t port;
+ struct evhttp *http = NULL;
+ struct evrpc_base *base = NULL;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+
+ rpc_setup(&http, &port, &base);
+
+ evcon = evhttp_connection_new("127.0.0.1", port);
+ tt_assert(evcon);
+
+ /*
+ * At this point, we want to schedule an HTTP POST request
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(rpc_postrequest_failure, NULL);
+ tt_assert(req);
+
+ /* Add the information that we care about */
+ evhttp_add_header(req->output_headers, "Host", "somehost");
+ evbuffer_add_printf(req->output_buffer, "Some Nonsense");
+
+ if (evhttp_make_request(evcon, req,
+ EVHTTP_REQ_POST,
+ "/.rpc.Message") == -1) {
+ tt_abort();
+ }
+
+ test_ok = 0;
+
+ event_dispatch();
+
+ evhttp_connection_free(evcon);
+
+ rpc_teardown(base);
+
+ tt_assert(test_ok == 1);
+
+end:
+ evhttp_free(http);
+}
+
+static void
+rpc_postrequest_done(struct evhttp_request *req, void *arg)
+{
+ struct kill* kill_reply = NULL;
+
+ if (req->response_code != HTTP_OK) {
+ fprintf(stderr, "FAILED (response code)\n");
+ exit(1);
+ }
+
+ kill_reply = kill_new();
+
+ if ((kill_unmarshal(kill_reply, req->input_buffer)) == -1) {
+ fprintf(stderr, "FAILED (unmarshal)\n");
+ exit(1);
+ }
+
+ kill_free(kill_reply);
+
+ test_ok = 1;
+ event_loopexit(NULL);
+}
+
+static void
+rpc_basic_message(void)
+{
+ ev_uint16_t port;
+ struct evhttp *http = NULL;
+ struct evrpc_base *base = NULL;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+ struct msg *msg;
+
+ rpc_setup(&http, &port, &base);
+
+ evcon = evhttp_connection_new("127.0.0.1", port);
+ tt_assert(evcon);
+
+ /*
+ * At this point, we want to schedule an HTTP POST request
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(rpc_postrequest_done, NULL);
+ if (req == NULL) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /* Add the information that we care about */
+ evhttp_add_header(req->output_headers, "Host", "somehost");
+
+ /* set up the basic message */
+ msg = msg_new();
+ EVTAG_ASSIGN(msg, from_name, "niels");
+ EVTAG_ASSIGN(msg, to_name, "tester");
+ msg_marshal(req->output_buffer, msg);
+ msg_free(msg);
+
+ if (evhttp_make_request(evcon, req,
+ EVHTTP_REQ_POST,
+ "/.rpc.Message") == -1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ test_ok = 0;
+
+ event_dispatch();
+
+ evhttp_connection_free(evcon);
+
+ rpc_teardown(base);
+
+end:
+ evhttp_free(http);
+}
+
+static struct evrpc_pool *
+rpc_pool_with_connection(ev_uint16_t port)
+{
+ struct evhttp_connection *evcon;
+ struct evrpc_pool *pool;
+
+ pool = evrpc_pool_new(NULL);
+ assert(pool != NULL);
+
+ evcon = evhttp_connection_new("127.0.0.1", port);
+ assert(evcon != NULL);
+
+ evrpc_pool_add_connection(pool, evcon);
+
+ return (pool);
+}
+
+static void
+GotKillCb(struct evrpc_status *status,
+ struct msg *msg, struct kill *kill, void *arg)
+{
+ char *weapon;
+ char *action;
+
+ if (need_output_hook) {
+ struct evhttp_request *req = status->http_req;
+ const char *header = evhttp_find_header(
+ req->input_headers, "X-Pool-Hook");
+ assert(header);
+ assert(strcmp(header, "ran") == 0);
+ }
+
+ if (status->error != EVRPC_STATUS_ERR_NONE)
+ goto done;
+
+ if (EVTAG_GET(kill, weapon, &weapon) == -1) {
+ fprintf(stderr, "get weapon\n");
+ goto done;
+ }
+ if (EVTAG_GET(kill, action, &action) == -1) {
+ fprintf(stderr, "get action\n");
+ goto done;
+ }
+
+ if (strcmp(weapon, "dagger"))
+ goto done;
+
+ if (strcmp(action, "wave around like an idiot"))
+ goto done;
+
+ test_ok += 1;
+
+done:
+ event_loopexit(NULL);
+}
+
+static void
+GotKillCbTwo(struct evrpc_status *status,
+ struct msg *msg, struct kill *kill, void *arg)
+{
+ char *weapon;
+ char *action;
+
+ if (status->error != EVRPC_STATUS_ERR_NONE)
+ goto done;
+
+ if (EVTAG_GET(kill, weapon, &weapon) == -1) {
+ fprintf(stderr, "get weapon\n");
+ goto done;
+ }
+ if (EVTAG_GET(kill, action, &action) == -1) {
+ fprintf(stderr, "get action\n");
+ goto done;
+ }
+
+ if (strcmp(weapon, "dagger"))
+ goto done;
+
+ if (strcmp(action, "wave around like an idiot"))
+ goto done;
+
+ test_ok += 1;
+
+done:
+ if (test_ok == 2)
+ event_loopexit(NULL);
+}
+
+static int
+rpc_hook_add_header(void *ctx, struct evhttp_request *req,
+ struct evbuffer *evbuf, void *arg)
+{
+ const char *hook_type = arg;
+ if (strcmp("input", hook_type) == 0)
+ evhttp_add_header(req->input_headers, "X-Hook", hook_type);
+ else
+ evhttp_add_header(req->output_headers, "X-Hook", hook_type);
+
+ assert(evrpc_hook_get_connection(ctx) != NULL);
+
+ return (EVRPC_CONTINUE);
+}
+
+static int
+rpc_hook_add_meta(void *ctx, struct evhttp_request *req,
+ struct evbuffer *evbuf, void *arg)
+{
+ evrpc_hook_add_meta(ctx, "meta", "test", 5);
+
+ assert(evrpc_hook_get_connection(ctx) != NULL);
+
+ return (EVRPC_CONTINUE);
+}
+
+static int
+rpc_hook_remove_header(void *ctx, struct evhttp_request *req,
+ struct evbuffer *evbuf, void *arg)
+{
+ const char *header = evhttp_find_header(req->input_headers, "X-Hook");
+ void *data = NULL;
+ size_t data_len = 0;
+
+ assert(header != NULL);
+ assert(strcmp(header, arg) == 0);
+
+ evhttp_remove_header(req->input_headers, "X-Hook");
+ evhttp_add_header(req->input_headers, "X-Pool-Hook", "ran");
+
+ assert(evrpc_hook_find_meta(ctx, "meta", &data, &data_len) == 0);
+ assert(data != NULL);
+ assert(data_len == 5);
+
+ assert(evrpc_hook_get_connection(ctx) != NULL);
+
+ return (EVRPC_CONTINUE);
+}
+
+static void
+rpc_basic_client(void)
+{
+ ev_uint16_t port;
+ struct evhttp *http = NULL;
+ struct evrpc_base *base = NULL;
+ struct evrpc_pool *pool = NULL;
+ struct msg *msg = NULL;
+ struct kill *kill = NULL;
+
+ rpc_setup(&http, &port, &base);
+
+ need_input_hook = 1;
+ need_output_hook = 1;
+
+ assert(evrpc_add_hook(base, EVRPC_INPUT, rpc_hook_add_header, (void*)"input")
+ != NULL);
+ assert(evrpc_add_hook(base, EVRPC_OUTPUT, rpc_hook_add_header, (void*)"output")
+ != NULL);
+
+ pool = rpc_pool_with_connection(port);
+ tt_assert(pool);
+
+ assert(evrpc_add_hook(pool, EVRPC_OUTPUT, rpc_hook_add_meta, NULL));
+ assert(evrpc_add_hook(pool, EVRPC_INPUT, rpc_hook_remove_header, (void*)"output"));
+
+ /* set up the basic message */
+ msg = msg_new();
+ tt_assert(msg);
+ EVTAG_ASSIGN(msg, from_name, "niels");
+ EVTAG_ASSIGN(msg, to_name, "tester");
+
+ kill = kill_new();
+
+ EVRPC_MAKE_REQUEST(Message, pool, msg, kill, GotKillCb, NULL);
+
+ test_ok = 0;
+
+ event_dispatch();
+
+ tt_assert(test_ok == 1);
+
+ /* we do it twice to make sure that reuse works correctly */
+ kill_clear(kill);
+
+ EVRPC_MAKE_REQUEST(Message, pool, msg, kill, GotKillCb, NULL);
+
+ event_dispatch();
+
+ tt_assert(test_ok == 2);
+
+ /* we do it trice to make sure other stuff works, too */
+ kill_clear(kill);
+
+ {
+ struct evrpc_request_wrapper *ctx =
+ EVRPC_MAKE_CTX(Message, msg, kill,
+ pool, msg, kill, GotKillCb, NULL);
+ evrpc_make_request(ctx);
+ }
+
+ event_dispatch();
+
+ rpc_teardown(base);
+
+ tt_assert(test_ok == 3);
+
+end:
+ if (msg)
+ msg_free(msg);
+ if (kill)
+ kill_free(kill);
+
+ if (pool)
+ evrpc_pool_free(pool);
+ if (http)
+ evhttp_free(http);
+
+ need_input_hook = 0;
+ need_output_hook = 0;
+}
+
+/*
+ * We are testing that the second requests gets send over the same
+ * connection after the first RPCs completes.
+ */
+static void
+rpc_basic_queued_client(void)
+{
+ ev_uint16_t port;
+ struct evhttp *http = NULL;
+ struct evrpc_base *base = NULL;
+ struct evrpc_pool *pool = NULL;
+ struct msg *msg=NULL;
+ struct kill *kill_one=NULL, *kill_two=NULL;
+
+ rpc_setup(&http, &port, &base);
+
+ pool = rpc_pool_with_connection(port);
+ tt_assert(pool);
+
+ /* set up the basic message */
+ msg = msg_new();
+ tt_assert(msg);
+ EVTAG_ASSIGN(msg, from_name, "niels");
+ EVTAG_ASSIGN(msg, to_name, "tester");
+
+ kill_one = kill_new();
+ kill_two = kill_new();
+
+ EVRPC_MAKE_REQUEST(Message, pool, msg, kill_one, GotKillCbTwo, NULL);
+ EVRPC_MAKE_REQUEST(Message, pool, msg, kill_two, GotKillCb, NULL);
+
+ test_ok = 0;
+
+ event_dispatch();
+
+ rpc_teardown(base);
+
+ tt_assert(test_ok == 2);
+
+end:
+ if (msg)
+ msg_free(msg);
+ if (kill_one)
+ kill_free(kill_one);
+ if (kill_two)
+ kill_free(kill_two);
+
+ if (pool)
+ evrpc_pool_free(pool);
+ if (http)
+ evhttp_free(http);
+}
+
+static void
+GotErrorCb(struct evrpc_status *status,
+ struct msg *msg, struct kill *kill, void *arg)
+{
+ if (status->error != EVRPC_STATUS_ERR_TIMEOUT)
+ goto done;
+
+ /* should never be complete but just to check */
+ if (kill_complete(kill) == 0)
+ goto done;
+
+ test_ok += 1;
+
+done:
+ event_loopexit(NULL);
+}
+
+/* we just pause the rpc and continue it in the next callback */
+
+struct rpc_hook_ctx_ {
+ void *vbase;
+ void *ctx;
+};
+
+static int hook_pause_cb_called=0;
+
+static void
+rpc_hook_pause_cb(evutil_socket_t fd, short what, void *arg)
+{
+ struct rpc_hook_ctx_ *ctx = arg;
+ ++hook_pause_cb_called;
+ evrpc_resume_request(ctx->vbase, ctx->ctx, EVRPC_CONTINUE);
+ free(arg);
+}
+
+static int
+rpc_hook_pause(void *ctx, struct evhttp_request *req, struct evbuffer *evbuf,
+ void *arg)
+{
+ struct rpc_hook_ctx_ *tmp = malloc(sizeof(*tmp));
+ struct timeval tv;
+
+ assert(tmp != NULL);
+ tmp->vbase = arg;
+ tmp->ctx = ctx;
+
+ memset(&tv, 0, sizeof(tv));
+ event_once(-1, EV_TIMEOUT, rpc_hook_pause_cb, tmp, &tv);
+ return EVRPC_PAUSE;
+}
+
+static void
+rpc_basic_client_with_pause(void)
+{
+ ev_uint16_t port;
+ struct evhttp *http = NULL;
+ struct evrpc_base *base = NULL;
+ struct evrpc_pool *pool = NULL;
+ struct msg *msg = NULL;
+ struct kill *kill= NULL;
+
+ rpc_setup(&http, &port, &base);
+
+ assert(evrpc_add_hook(base, EVRPC_INPUT, rpc_hook_pause, base));
+ assert(evrpc_add_hook(base, EVRPC_OUTPUT, rpc_hook_pause, base));
+
+ pool = rpc_pool_with_connection(port);
+ tt_assert(pool);
+ assert(evrpc_add_hook(pool, EVRPC_INPUT, rpc_hook_pause, pool));
+ assert(evrpc_add_hook(pool, EVRPC_OUTPUT, rpc_hook_pause, pool));
+
+ /* set up the basic message */
+ msg = msg_new();
+ tt_assert(msg);
+ EVTAG_ASSIGN(msg, from_name, "niels");
+ EVTAG_ASSIGN(msg, to_name, "tester");
+
+ kill = kill_new();
+
+ EVRPC_MAKE_REQUEST(Message, pool, msg, kill, GotKillCb, NULL);
+
+ test_ok = 0;
+
+ event_dispatch();
+
+ tt_int_op(test_ok, ==, 1);
+ tt_int_op(hook_pause_cb_called, ==, 4);
+
+end:
+ if (base)
+ rpc_teardown(base);
+
+ if (msg)
+ msg_free(msg);
+ if (kill)
+ kill_free(kill);
+
+ if (pool)
+ evrpc_pool_free(pool);
+ if (http)
+ evhttp_free(http);
+}
+
+static void
+rpc_client_timeout(void)
+{
+ ev_uint16_t port;
+ struct evhttp *http = NULL;
+ struct evrpc_base *base = NULL;
+ struct evrpc_pool *pool = NULL;
+ struct msg *msg = NULL;
+ struct kill *kill = NULL;
+
+ rpc_setup(&http, &port, &base);
+
+ pool = rpc_pool_with_connection(port);
+ tt_assert(pool);
+
+ /* set the timeout to 1 second. */
+ evrpc_pool_set_timeout(pool, 1);
+
+ /* set up the basic message */
+ msg = msg_new();
+ tt_assert(msg);
+ EVTAG_ASSIGN(msg, from_name, "niels");
+ EVTAG_ASSIGN(msg, to_name, "tester");
+
+ kill = kill_new();
+
+ EVRPC_MAKE_REQUEST(NeverReply, pool, msg, kill, GotErrorCb, NULL);
+
+ test_ok = 0;
+
+ event_dispatch();
+
+ /* free the saved RPC structure up */
+ EVRPC_REQUEST_DONE(saved_rpc);
+
+ rpc_teardown(base);
+
+ tt_assert(test_ok == 2);
+
+end:
+ if (msg)
+ msg_free(msg);
+ if (kill)
+ kill_free(kill);
+
+ if (pool)
+ evrpc_pool_free(pool);
+ if (http)
+ evhttp_free(http);
+}
+
+static void
+rpc_test(void)
+{
+ struct msg *msg = NULL, *msg2 = NULL;
+ struct kill *attack = NULL;
+ struct run *run = NULL;
+ struct evbuffer *tmp = evbuffer_new();
+ struct timeval tv_start, tv_end;
+ ev_uint32_t tag;
+ int i;
+
+ msg = msg_new();
+
+ tt_assert(msg);
+
+ EVTAG_ASSIGN(msg, from_name, "niels");
+ EVTAG_ASSIGN(msg, to_name, "phoenix");
+
+ if (EVTAG_GET(msg, attack, &attack) == -1) {
+ tt_abort_msg("Failed to set kill message.");
+ }
+
+ EVTAG_ASSIGN(attack, weapon, "feather");
+ EVTAG_ASSIGN(attack, action, "tickle");
+ for (i = 0; i < 3; ++i) {
+ if (EVTAG_ARRAY_ADD_VALUE(attack, how_often, i) == NULL) {
+ tt_abort_msg("Failed to add how_often.");
+ }
+ }
+
+ evutil_gettimeofday(&tv_start, NULL);
+ for (i = 0; i < 1000; ++i) {
+ run = EVTAG_ARRAY_ADD(msg, run);
+ if (run == NULL) {
+ tt_abort_msg("Failed to add run message.");
+ }
+ EVTAG_ASSIGN(run, how, "very fast but with some data in it");
+ EVTAG_ASSIGN(run, fixed_bytes,
+ (ev_uint8_t*)"012345678901234567890123");
+
+ if (EVTAG_ARRAY_ADD_VALUE(
+ run, notes, "this is my note") == NULL) {
+ tt_abort_msg("Failed to add note.");
+ }
+ if (EVTAG_ARRAY_ADD_VALUE(run, notes, "pps") == NULL) {
+ tt_abort_msg("Failed to add note");
+ }
+
+ EVTAG_ASSIGN(run, large_number, 0xdead0a0bcafebeefLL);
+ EVTAG_ARRAY_ADD_VALUE(run, other_numbers, 0xdead0a0b);
+ EVTAG_ARRAY_ADD_VALUE(run, other_numbers, 0xbeefcafe);
+ }
+
+ if (msg_complete(msg) == -1)
+ tt_abort_msg("Failed to make complete message.");
+
+ evtag_marshal_msg(tmp, 0xdeaf, msg);
+
+ if (evtag_peek(tmp, &tag) == -1)
+ tt_abort_msg("Failed to peak tag.");
+
+ if (tag != 0xdeaf)
+ TT_DIE(("Got incorrect tag: %0x.", (unsigned)tag));
+
+ msg2 = msg_new();
+ if (evtag_unmarshal_msg(tmp, 0xdeaf, msg2) == -1)
+ tt_abort_msg("Failed to unmarshal message.");
+
+ evutil_gettimeofday(&tv_end, NULL);
+ evutil_timersub(&tv_end, &tv_start, &tv_end);
+ TT_BLATHER(("(%.1f us/add) ",
+ (float)tv_end.tv_sec/(float)i * 1000000.0 +
+ tv_end.tv_usec / (float)i));
+
+ if (!EVTAG_HAS(msg2, from_name) ||
+ !EVTAG_HAS(msg2, to_name) ||
+ !EVTAG_HAS(msg2, attack)) {
+ tt_abort_msg("Missing data structures.");
+ }
+
+ if (EVTAG_GET(msg2, attack, &attack) == -1) {
+ tt_abort_msg("Could not get attack.");
+ }
+
+ if (EVTAG_ARRAY_LEN(msg2, run) != i) {
+ tt_abort_msg("Wrong number of run messages.");
+ }
+
+ /* get the very first run message */
+ if (EVTAG_ARRAY_GET(msg2, run, 0, &run) == -1) {
+ tt_abort_msg("Failed to get run msg.");
+ } else {
+ /* verify the notes */
+ char *note_one, *note_two;
+ ev_uint64_t large_number;
+ ev_uint32_t short_number;
+
+ if (EVTAG_ARRAY_LEN(run, notes) != 2) {
+ tt_abort_msg("Wrong number of note strings.");
+ }
+
+ if (EVTAG_ARRAY_GET(run, notes, 0, &note_one) == -1 ||
+ EVTAG_ARRAY_GET(run, notes, 1, &note_two) == -1) {
+ tt_abort_msg("Could not get note strings.");
+ }
+
+ if (strcmp(note_one, "this is my note") ||
+ strcmp(note_two, "pps")) {
+ tt_abort_msg("Incorrect note strings encoded.");
+ }
+
+ if (EVTAG_GET(run, large_number, &large_number) == -1 ||
+ large_number != 0xdead0a0bcafebeefLL) {
+ tt_abort_msg("Incorrrect large_number.");
+ }
+
+ if (EVTAG_ARRAY_LEN(run, other_numbers) != 2) {
+ tt_abort_msg("Wrong number of other_numbers.");
+ }
+
+ if (EVTAG_ARRAY_GET(
+ run, other_numbers, 0, &short_number) == -1) {
+ tt_abort_msg("Could not get short number.");
+ }
+ tt_uint_op(short_number, ==, 0xdead0a0b);
+
+ }
+ tt_int_op(EVTAG_ARRAY_LEN(attack, how_often), ==, 3);
+
+ for (i = 0; i < 3; ++i) {
+ ev_uint32_t res;
+ if (EVTAG_ARRAY_GET(attack, how_often, i, &res) == -1) {
+ TT_DIE(("Cannot get %dth how_often msg.", i));
+ }
+ if ((int)res != i) {
+ TT_DIE(("Wrong message encoded %d != %d", i, res));
+ }
+ }
+
+ test_ok = 1;
+end:
+ if (msg)
+ msg_free(msg);
+ if (msg2)
+ msg_free(msg2);
+ if (tmp)
+ evbuffer_free(tmp);
+}
+
+#define RPC_LEGACY(name) \
+ { #name, run_legacy_test_fn, TT_FORK|TT_NEED_BASE|TT_LEGACY, \
+ &legacy_setup, \
+ rpc_##name }
+#else
+/* NO_PYTHON_EXISTS */
+
+#define RPC_LEGACY(name) \
+ { #name, NULL, TT_SKIP, NULL, NULL }
+
+#endif
+
+struct testcase_t rpc_testcases[] = {
+ RPC_LEGACY(basic_test),
+ RPC_LEGACY(basic_message),
+ RPC_LEGACY(basic_client),
+ RPC_LEGACY(basic_queued_client),
+ RPC_LEGACY(basic_client_with_pause),
+ RPC_LEGACY(client_timeout),
+ RPC_LEGACY(test),
+
+ END_OF_TESTCASES,
+};
diff --git a/libs/libevent/docs/test/regress_ssl.c b/libs/libevent/docs/test/regress_ssl.c
new file mode 100644
index 0000000000..a415952a7d
--- /dev/null
+++ b/libs/libevent/docs/test/regress_ssl.c
@@ -0,0 +1,781 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Get rid of OSX 10.7 and greater deprecation warnings.
+#if defined(__APPLE__) && defined(__clang__)
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#endif
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+#ifndef _WIN32
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#endif
+
+#include "event2/util.h"
+#include "event2/event.h"
+#include "event2/bufferevent_ssl.h"
+#include "event2/bufferevent_struct.h"
+#include "event2/buffer.h"
+#include "event2/listener.h"
+
+#include "regress.h"
+#include "tinytest.h"
+#include "tinytest_macros.h"
+
+#include <openssl/bio.h>
+#include <openssl/err.h>
+#include <openssl/pem.h>
+
+#include <string.h>
+#ifdef _WIN32
+#include <io.h>
+#define read _read
+#define write _write
+#else
+#include <unistd.h>
+#endif
+
+/* A short pre-generated key, to save the cost of doing an RSA key generation
+ * step during the unit tests. It's only 512 bits long, and it is published
+ * in this file, so you would have to be very foolish to consider using it in
+ * your own code. */
+static const char KEY[] =
+ "-----BEGIN RSA PRIVATE KEY-----\n"
+ "MIIBOgIBAAJBAKibTEzXjj+sqpipePX1lEk5BNFuL/dDBbw8QCXgaJWikOiKHeJq\n"
+ "3FQ0OmCnmpkdsPFE4x3ojYmmdgE2i0dJwq0CAwEAAQJAZ08gpUS+qE1IClps/2gG\n"
+ "AAer6Bc31K2AaiIQvCSQcH440cp062QtWMC3V5sEoWmdLsbAHFH26/9ZHn5zAflp\n"
+ "gQIhANWOx/UYeR8HD0WREU5kcuSzgzNLwUErHLzxP7U6aojpAiEAyh2H35CjN/P7\n"
+ "NhcZ4QYw3PeUWpqgJnaE/4i80BSYkSUCIQDLHFhLYLJZ80HwHTADif/ISn9/Ow6b\n"
+ "p6BWh3DbMar/eQIgBPS6azH5vpp983KXkNv9AL4VZi9ac/b+BeINdzC6GP0CIDmB\n"
+ "U6GFEQTZ3IfuiVabG5pummdC4DNbcdI+WKrSFNmQ\n"
+ "-----END RSA PRIVATE KEY-----\n";
+
+EVP_PKEY *
+ssl_getkey(void)
+{
+ EVP_PKEY *key;
+ BIO *bio;
+
+ /* new read-only BIO backed by KEY. */
+ bio = BIO_new_mem_buf((char*)KEY, -1);
+ tt_assert(bio);
+
+ key = PEM_read_bio_PrivateKey(bio,NULL,NULL,NULL);
+ BIO_free(bio);
+ tt_assert(key);
+
+ return key;
+end:
+ return NULL;
+}
+
+X509 *
+ssl_getcert(void)
+{
+ /* Dummy code to make a quick-and-dirty valid certificate with
+ OpenSSL. Don't copy this code into your own program! It does a
+ number of things in a stupid and insecure way. */
+ X509 *x509 = NULL;
+ X509_NAME *name = NULL;
+ EVP_PKEY *key = ssl_getkey();
+ int nid;
+ time_t now = time(NULL);
+
+ tt_assert(key);
+
+ x509 = X509_new();
+ tt_assert(x509);
+ tt_assert(0 != X509_set_version(x509, 2));
+ tt_assert(0 != ASN1_INTEGER_set(X509_get_serialNumber(x509),
+ (long)now));
+
+ name = X509_NAME_new();
+ tt_assert(name);
+ nid = OBJ_txt2nid("commonName");
+ tt_assert(NID_undef != nid);
+ tt_assert(0 != X509_NAME_add_entry_by_NID(
+ name, nid, MBSTRING_ASC, (unsigned char*)"example.com",
+ -1, -1, 0));
+
+ X509_set_subject_name(x509, name);
+ X509_set_issuer_name(x509, name);
+
+ X509_time_adj(X509_get_notBefore(x509), 0, &now);
+ now += 3600;
+ X509_time_adj(X509_get_notAfter(x509), 0, &now);
+ X509_set_pubkey(x509, key);
+ tt_assert(0 != X509_sign(x509, key, EVP_sha1()));
+
+ return x509;
+end:
+ X509_free(x509);
+ return NULL;
+}
+
+static int disable_tls_11_and_12 = 0;
+static SSL_CTX *the_ssl_ctx = NULL;
+
+SSL_CTX *
+get_ssl_ctx(void)
+{
+ if (the_ssl_ctx)
+ return the_ssl_ctx;
+ the_ssl_ctx = SSL_CTX_new(SSLv23_method());
+ if (!the_ssl_ctx)
+ return NULL;
+ if (disable_tls_11_and_12) {
+#ifdef SSL_OP_NO_TLSv1_2
+ SSL_CTX_set_options(the_ssl_ctx, SSL_OP_NO_TLSv1_2);
+#endif
+#ifdef SSL_OP_NO_TLSv1_1
+ SSL_CTX_set_options(the_ssl_ctx, SSL_OP_NO_TLSv1_1);
+#endif
+ }
+ return the_ssl_ctx;
+}
+
+void
+init_ssl(void)
+{
+ SSL_library_init();
+ ERR_load_crypto_strings();
+ SSL_load_error_strings();
+ OpenSSL_add_all_algorithms();
+ if (SSLeay() != OPENSSL_VERSION_NUMBER) {
+ TT_DECLARE("WARN", ("Version mismatch for openssl: compiled with %lx but running with %lx", (unsigned long)OPENSSL_VERSION_NUMBER, (unsigned long) SSLeay()));
+ }
+}
+
+/* ====================
+ Here's a simple test: we read a number from the input, increment it, and
+ reply, until we get to 1001.
+*/
+
+static int test_is_done = 0;
+static int n_connected = 0;
+static int got_close = 0;
+static int got_error = 0;
+static int got_timeout = 0;
+static int renegotiate_at = -1;
+static int stop_when_connected = 0;
+static int pending_connect_events = 0;
+static struct event_base *exit_base = NULL;
+
+enum regress_openssl_type
+{
+ REGRESS_OPENSSL_SOCKETPAIR = 1,
+ REGRESS_OPENSSL_FILTER = 2,
+ REGRESS_OPENSSL_RENEGOTIATE = 4,
+ REGRESS_OPENSSL_OPEN = 8,
+ REGRESS_OPENSSL_DIRTY_SHUTDOWN = 16,
+ REGRESS_OPENSSL_FD = 32,
+
+ REGRESS_OPENSSL_CLIENT = 64,
+ REGRESS_OPENSSL_SERVER = 128,
+
+ REGRESS_OPENSSL_FREED = 256,
+ REGRESS_OPENSSL_TIMEOUT = 512,
+ REGRESS_OPENSSL_SLEEP = 1024,
+};
+
+static void
+bufferevent_openssl_check_fd(struct bufferevent *bev, int filter)
+{
+ if (filter) {
+ tt_int_op(bufferevent_getfd(bev), ==, -1);
+ tt_int_op(bufferevent_setfd(bev, -1), ==, -1);
+ } else {
+ tt_int_op(bufferevent_getfd(bev), !=, -1);
+ tt_int_op(bufferevent_setfd(bev, -1), ==, 0);
+ }
+ tt_int_op(bufferevent_getfd(bev), ==, -1);
+
+end:
+ ;
+}
+static void
+bufferevent_openssl_check_freed(struct bufferevent *bev)
+{
+ tt_int_op(event_pending(&bev->ev_read, EVLIST_ALL, NULL), ==, 0);
+ tt_int_op(event_pending(&bev->ev_write, EVLIST_ALL, NULL), ==, 0);
+
+end:
+ ;
+}
+
+static void
+respond_to_number(struct bufferevent *bev, void *ctx)
+{
+ struct evbuffer *b = bufferevent_get_input(bev);
+ char *line;
+ int n;
+
+ enum regress_openssl_type type;
+ type = (enum regress_openssl_type)ctx;
+
+ line = evbuffer_readln(b, NULL, EVBUFFER_EOL_LF);
+ if (! line)
+ return;
+ n = atoi(line);
+ if (n <= 0)
+ TT_FAIL(("Bad number: %s", line));
+ free(line);
+ TT_BLATHER(("The number was %d", n));
+ if (n == 1001) {
+ ++test_is_done;
+ bufferevent_free(bev); /* Should trigger close on other side. */
+ return;
+ }
+ if ((type & REGRESS_OPENSSL_CLIENT) && n == renegotiate_at) {
+ SSL_renegotiate(bufferevent_openssl_get_ssl(bev));
+ }
+ ++n;
+ evbuffer_add_printf(bufferevent_get_output(bev),
+ "%d\n", n);
+ TT_BLATHER(("Done reading; now writing."));
+ bufferevent_enable(bev, EV_WRITE);
+ bufferevent_disable(bev, EV_READ);
+}
+
+static void
+done_writing_cb(struct bufferevent *bev, void *ctx)
+{
+ struct evbuffer *b = bufferevent_get_output(bev);
+ if (evbuffer_get_length(b))
+ return;
+ TT_BLATHER(("Done writing."));
+ bufferevent_disable(bev, EV_WRITE);
+ bufferevent_enable(bev, EV_READ);
+}
+
+static void
+eventcb(struct bufferevent *bev, short what, void *ctx)
+{
+ enum regress_openssl_type type;
+ type = (enum regress_openssl_type)ctx;
+
+ TT_BLATHER(("Got event %d", (int)what));
+ if (what & BEV_EVENT_CONNECTED) {
+ SSL *ssl;
+ X509 *peer_cert;
+ ++n_connected;
+ ssl = bufferevent_openssl_get_ssl(bev);
+ tt_assert(ssl);
+ peer_cert = SSL_get_peer_certificate(ssl);
+ if (type & REGRESS_OPENSSL_SERVER) {
+ tt_assert(peer_cert == NULL);
+ } else {
+ tt_assert(peer_cert != NULL);
+ }
+ if (stop_when_connected) {
+ if (--pending_connect_events == 0)
+ event_base_loopexit(exit_base, NULL);
+ }
+ } else if (what & BEV_EVENT_EOF) {
+ TT_BLATHER(("Got a good EOF"));
+ ++got_close;
+ if (type & REGRESS_OPENSSL_FD) {
+ bufferevent_openssl_check_fd(bev, type & REGRESS_OPENSSL_FILTER);
+ }
+ if (type & REGRESS_OPENSSL_FREED) {
+ bufferevent_openssl_check_freed(bev);
+ }
+ bufferevent_free(bev);
+ } else if (what & BEV_EVENT_ERROR) {
+ TT_BLATHER(("Got an error."));
+ ++got_error;
+ if (type & REGRESS_OPENSSL_FD) {
+ bufferevent_openssl_check_fd(bev, type & REGRESS_OPENSSL_FILTER);
+ }
+ if (type & REGRESS_OPENSSL_FREED) {
+ bufferevent_openssl_check_freed(bev);
+ }
+ bufferevent_free(bev);
+ } else if (what & BEV_EVENT_TIMEOUT) {
+ TT_BLATHER(("Got timeout."));
+ ++got_timeout;
+ if (type & REGRESS_OPENSSL_FD) {
+ bufferevent_openssl_check_fd(bev, type & REGRESS_OPENSSL_FILTER);
+ }
+ if (type & REGRESS_OPENSSL_FREED) {
+ bufferevent_openssl_check_freed(bev);
+ }
+ bufferevent_free(bev);
+ }
+end:
+ ;
+}
+
+static void
+open_ssl_bufevs(struct bufferevent **bev1_out, struct bufferevent **bev2_out,
+ struct event_base *base, int is_open, int flags, SSL *ssl1, SSL *ssl2,
+ evutil_socket_t *fd_pair, struct bufferevent **underlying_pair,
+ enum regress_openssl_type type)
+{
+ int state1 = is_open ? BUFFEREVENT_SSL_OPEN :BUFFEREVENT_SSL_CONNECTING;
+ int state2 = is_open ? BUFFEREVENT_SSL_OPEN :BUFFEREVENT_SSL_ACCEPTING;
+ int dirty_shutdown = type & REGRESS_OPENSSL_DIRTY_SHUTDOWN;
+ if (fd_pair) {
+ *bev1_out = bufferevent_openssl_socket_new(
+ base, fd_pair[0], ssl1, state1, flags);
+ *bev2_out = bufferevent_openssl_socket_new(
+ base, fd_pair[1], ssl2, state2, flags);
+ } else {
+ *bev1_out = bufferevent_openssl_filter_new(
+ base, underlying_pair[0], ssl1, state1, flags);
+ *bev2_out = bufferevent_openssl_filter_new(
+ base, underlying_pair[1], ssl2, state2, flags);
+
+ }
+ bufferevent_setcb(*bev1_out, respond_to_number, done_writing_cb,
+ eventcb, (void*)(REGRESS_OPENSSL_CLIENT | (long)type));
+ bufferevent_setcb(*bev2_out, respond_to_number, done_writing_cb,
+ eventcb, (void*)(REGRESS_OPENSSL_SERVER | (long)type));
+
+ bufferevent_openssl_set_allow_dirty_shutdown(*bev1_out, dirty_shutdown);
+ bufferevent_openssl_set_allow_dirty_shutdown(*bev2_out, dirty_shutdown);
+}
+
+static void
+regress_bufferevent_openssl(void *arg)
+{
+ struct basic_test_data *data = arg;
+
+ struct bufferevent *bev1, *bev2;
+ SSL *ssl1, *ssl2;
+ X509 *cert = ssl_getcert();
+ EVP_PKEY *key = ssl_getkey();
+ int flags = BEV_OPT_DEFER_CALLBACKS;
+ struct bufferevent *bev_ll[2] = { NULL, NULL };
+ evutil_socket_t *fd_pair = NULL;
+
+ enum regress_openssl_type type;
+ type = (enum regress_openssl_type)data->setup_data;
+
+ tt_assert(cert);
+ tt_assert(key);
+
+ init_ssl();
+
+ if (type & REGRESS_OPENSSL_RENEGOTIATE) {
+ if (SSLeay() >= 0x10001000 &&
+ SSLeay() < 0x1000104f) {
+ /* 1.0.1 up to 1.0.1c has a bug where TLS1.1 and 1.2
+ * can't renegotiate with themselves. Disable. */
+ disable_tls_11_and_12 = 1;
+ }
+ renegotiate_at = 600;
+ }
+
+ ssl1 = SSL_new(get_ssl_ctx());
+ ssl2 = SSL_new(get_ssl_ctx());
+
+ SSL_use_certificate(ssl2, cert);
+ SSL_use_PrivateKey(ssl2, key);
+
+ if (!(type & REGRESS_OPENSSL_OPEN))
+ flags |= BEV_OPT_CLOSE_ON_FREE;
+
+ if (!(type & REGRESS_OPENSSL_FILTER)) {
+ tt_assert(type & REGRESS_OPENSSL_SOCKETPAIR);
+ fd_pair = data->pair;
+ } else {
+ bev_ll[0] = bufferevent_socket_new(data->base, data->pair[0],
+ BEV_OPT_CLOSE_ON_FREE);
+ bev_ll[1] = bufferevent_socket_new(data->base, data->pair[1],
+ BEV_OPT_CLOSE_ON_FREE);
+ }
+
+ open_ssl_bufevs(&bev1, &bev2, data->base, 0, flags, ssl1, ssl2,
+ fd_pair, bev_ll, type);
+
+ if (!(type & REGRESS_OPENSSL_FILTER)) {
+ tt_int_op(bufferevent_getfd(bev1), ==, data->pair[0]);
+ } else {
+ tt_ptr_op(bufferevent_get_underlying(bev1), ==, bev_ll[0]);
+ }
+
+ if (type & REGRESS_OPENSSL_OPEN) {
+ pending_connect_events = 2;
+ stop_when_connected = 1;
+ exit_base = data->base;
+ event_base_dispatch(data->base);
+ /* Okay, now the renegotiation is done. Make new
+ * bufferevents to test opening in BUFFEREVENT_SSL_OPEN */
+ flags |= BEV_OPT_CLOSE_ON_FREE;
+ bufferevent_free(bev1);
+ bufferevent_free(bev2);
+ bev1 = bev2 = NULL;
+ open_ssl_bufevs(&bev1, &bev2, data->base, 1, flags, ssl1, ssl2,
+ fd_pair, bev_ll, type);
+ }
+
+ if (!(type & REGRESS_OPENSSL_TIMEOUT)) {
+ bufferevent_enable(bev1, EV_READ|EV_WRITE);
+ bufferevent_enable(bev2, EV_READ|EV_WRITE);
+
+ evbuffer_add_printf(bufferevent_get_output(bev1), "1\n");
+
+ event_base_dispatch(data->base);
+
+ tt_assert(test_is_done == 1);
+ tt_assert(n_connected == 2);
+
+ /* We don't handle shutdown properly yet */
+ if (type & REGRESS_OPENSSL_DIRTY_SHUTDOWN) {
+ tt_int_op(got_close, ==, 1);
+ tt_int_op(got_error, ==, 0);
+ } else {
+ tt_int_op(got_error, ==, 1);
+ }
+ tt_int_op(got_timeout, ==, 0);
+ } else {
+ struct timeval t = { 2, 0 };
+
+ bufferevent_enable(bev1, EV_READ|EV_WRITE);
+ bufferevent_disable(bev2, EV_READ|EV_WRITE);
+
+ bufferevent_set_timeouts(bev1, &t, &t);
+
+ evbuffer_add_printf(bufferevent_get_output(bev1), "1\n");
+
+ event_base_dispatch(data->base);
+
+ tt_assert(test_is_done == 0);
+ tt_assert(n_connected == 0);
+
+ tt_int_op(got_close, ==, 0);
+ tt_int_op(got_error, ==, 0);
+ tt_int_op(got_timeout, ==, 1);
+ }
+end:
+ return;
+}
+
+static void
+acceptcb_deferred(evutil_socket_t fd, short events, void *arg)
+{
+ struct bufferevent *bev = arg;
+ bufferevent_enable(bev, EV_READ|EV_WRITE);
+}
+static void
+acceptcb(struct evconnlistener *listener, evutil_socket_t fd,
+ struct sockaddr *addr, int socklen, void *arg)
+{
+ struct basic_test_data *data = arg;
+ struct bufferevent *bev;
+ enum regress_openssl_type type;
+ SSL *ssl = SSL_new(get_ssl_ctx());
+
+ type = (enum regress_openssl_type)data->setup_data;
+
+ SSL_use_certificate(ssl, ssl_getcert());
+ SSL_use_PrivateKey(ssl, ssl_getkey());
+
+ bev = bufferevent_openssl_socket_new(
+ data->base,
+ fd,
+ ssl,
+ BUFFEREVENT_SSL_ACCEPTING,
+ BEV_OPT_CLOSE_ON_FREE|BEV_OPT_DEFER_CALLBACKS);
+
+ bufferevent_setcb(bev, respond_to_number, NULL, eventcb,
+ (void*)(REGRESS_OPENSSL_SERVER));
+
+ if (type & REGRESS_OPENSSL_SLEEP) {
+ struct timeval when = { 1, 0 };
+ event_base_once(data->base, -1, EV_TIMEOUT,
+ acceptcb_deferred, bev, &when);
+ bufferevent_disable(bev, EV_READ|EV_WRITE);
+ } else {
+ bufferevent_enable(bev, EV_READ|EV_WRITE);
+ }
+
+ /* Only accept once, then disable ourself. */
+ evconnlistener_disable(listener);
+}
+
+struct rwcount
+{
+ int fd;
+ size_t read;
+ size_t write;
+};
+static int
+bio_rwcount_new(BIO *b)
+{
+ b->init = 0;
+ b->num = -1;
+ b->ptr = NULL;
+ b->flags = 0;
+ return 1;
+}
+static int
+bio_rwcount_free(BIO *b)
+{
+ if (!b)
+ return 0;
+ if (b->shutdown) {
+ b->init = 0;
+ b->flags = 0;
+ b->ptr = NULL;
+ }
+ return 1;
+}
+static int
+bio_rwcount_read(BIO *b, char *out, int outlen)
+{
+ struct rwcount *rw = b->ptr;
+ ev_ssize_t ret = read(rw->fd, out, outlen);
+ ++rw->read;
+ if (ret == -1 && errno == EAGAIN) {
+ BIO_set_retry_read(b);
+ }
+ return ret;
+}
+static int
+bio_rwcount_write(BIO *b, const char *in, int inlen)
+{
+
+ struct rwcount *rw = b->ptr;
+ ev_ssize_t ret = write(rw->fd, in, inlen);
+ ++rw->write;
+ if (ret == -1 && errno == EAGAIN) {
+ BIO_set_retry_write(b);
+ }
+ return ret;
+}
+static long
+bio_rwcount_ctrl(BIO *b, int cmd, long num, void *ptr)
+{
+ long ret = 0;
+ switch (cmd) {
+ case BIO_CTRL_GET_CLOSE:
+ ret = b->shutdown;
+ break;
+ case BIO_CTRL_SET_CLOSE:
+ b->shutdown = (int)num;
+ break;
+ case BIO_CTRL_PENDING:
+ ret = 0;
+ break;
+ case BIO_CTRL_WPENDING:
+ ret = 0;
+ break;
+ case BIO_CTRL_DUP:
+ case BIO_CTRL_FLUSH:
+ ret = 1;
+ break;
+ }
+ return ret;
+}
+static int
+bio_rwcount_puts(BIO *b, const char *s)
+{
+ return bio_rwcount_write(b, s, strlen(s));
+}
+#define BIO_TYPE_LIBEVENT_RWCOUNT 0xff1
+static BIO_METHOD methods_rwcount = {
+ BIO_TYPE_LIBEVENT_RWCOUNT, "rwcount",
+ bio_rwcount_write,
+ bio_rwcount_read,
+ bio_rwcount_puts,
+ NULL /* bio_rwcount_gets */,
+ bio_rwcount_ctrl,
+ bio_rwcount_new,
+ bio_rwcount_free,
+ NULL /* callback_ctrl */,
+};
+static BIO_METHOD *
+BIO_s_rwcount(void)
+{
+ return &methods_rwcount;
+}
+static BIO *
+BIO_new_rwcount(int close_flag)
+{
+ BIO *result;
+ if (!(result = BIO_new(BIO_s_rwcount())))
+ return NULL;
+ result->init = 1;
+ result->ptr = NULL;
+ result->shutdown = !!close_flag;
+ return result;
+}
+
+static void
+regress_bufferevent_openssl_connect(void *arg)
+{
+ struct basic_test_data *data = arg;
+
+ struct event_base *base = data->base;
+
+ struct evconnlistener *listener;
+ struct bufferevent *bev;
+ struct sockaddr_in sin;
+ struct sockaddr_storage ss;
+ ev_socklen_t slen;
+ SSL *ssl;
+ BIO *bio;
+ struct rwcount rw = { -1, 0, 0 };
+ enum regress_openssl_type type;
+
+ type = (enum regress_openssl_type)data->setup_data;
+
+ init_ssl();
+
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = htonl(0x7f000001);
+
+ memset(&ss, 0, sizeof(ss));
+ slen = sizeof(ss);
+
+ listener = evconnlistener_new_bind(base, acceptcb, data,
+ LEV_OPT_CLOSE_ON_FREE|LEV_OPT_REUSEABLE,
+ -1, (struct sockaddr *)&sin, sizeof(sin));
+
+ tt_assert(listener);
+ tt_assert(evconnlistener_get_fd(listener) >= 0);
+
+ ssl = SSL_new(get_ssl_ctx());
+ tt_assert(ssl);
+
+ bev = bufferevent_openssl_socket_new(
+ data->base, -1, ssl,
+ BUFFEREVENT_SSL_CONNECTING,
+ BEV_OPT_CLOSE_ON_FREE|BEV_OPT_DEFER_CALLBACKS);
+ tt_assert(bev);
+
+ bufferevent_setcb(bev, respond_to_number, NULL, eventcb,
+ (void*)(REGRESS_OPENSSL_CLIENT));
+
+ tt_assert(getsockname(evconnlistener_get_fd(listener),
+ (struct sockaddr*)&ss, &slen) == 0);
+ tt_assert(slen == sizeof(struct sockaddr_in));
+ tt_int_op(((struct sockaddr*)&ss)->sa_family, ==, AF_INET);
+
+ tt_assert(0 ==
+ bufferevent_socket_connect(bev, (struct sockaddr*)&ss, slen));
+ /* Possible only when we have fd, since be_openssl can and will overwrite
+ * bio otherwise before */
+ if (type & REGRESS_OPENSSL_SLEEP) {
+ rw.fd = bufferevent_getfd(bev);
+ bio = BIO_new_rwcount(0);
+ tt_assert(bio);
+ bio->ptr = &rw;
+ SSL_set_bio(ssl, bio, bio);
+ }
+ evbuffer_add_printf(bufferevent_get_output(bev), "1\n");
+ bufferevent_enable(bev, EV_READ|EV_WRITE);
+
+ event_base_dispatch(base);
+
+ tt_int_op(rw.read, <=, 100);
+ tt_int_op(rw.write, <=, 100);
+end:
+ ;
+}
+
+struct testcase_t ssl_testcases[] = {
+#define T(a) ((void *)(a))
+ { "bufferevent_socketpair", regress_bufferevent_openssl,
+ TT_ISOLATED, &basic_setup, T(REGRESS_OPENSSL_SOCKETPAIR) },
+ { "bufferevent_filter", regress_bufferevent_openssl,
+ TT_ISOLATED, &basic_setup, T(REGRESS_OPENSSL_FILTER) },
+ { "bufferevent_renegotiate_socketpair", regress_bufferevent_openssl,
+ TT_ISOLATED, &basic_setup,
+ T(REGRESS_OPENSSL_SOCKETPAIR | REGRESS_OPENSSL_RENEGOTIATE) },
+ { "bufferevent_renegotiate_filter", regress_bufferevent_openssl,
+ TT_ISOLATED, &basic_setup,
+ T(REGRESS_OPENSSL_FILTER | REGRESS_OPENSSL_RENEGOTIATE) },
+ { "bufferevent_socketpair_startopen", regress_bufferevent_openssl,
+ TT_ISOLATED, &basic_setup,
+ T(REGRESS_OPENSSL_SOCKETPAIR | REGRESS_OPENSSL_OPEN) },
+ { "bufferevent_filter_startopen", regress_bufferevent_openssl,
+ TT_ISOLATED, &basic_setup,
+ T(REGRESS_OPENSSL_FILTER | REGRESS_OPENSSL_OPEN) },
+
+ { "bufferevent_socketpair_dirty_shutdown", regress_bufferevent_openssl,
+ TT_ISOLATED, &basic_setup,
+ T(REGRESS_OPENSSL_SOCKETPAIR | REGRESS_OPENSSL_DIRTY_SHUTDOWN) },
+ { "bufferevent_filter_dirty_shutdown", regress_bufferevent_openssl,
+ TT_ISOLATED, &basic_setup,
+ T(REGRESS_OPENSSL_FILTER | REGRESS_OPENSSL_DIRTY_SHUTDOWN) },
+ { "bufferevent_renegotiate_socketpair_dirty_shutdown",
+ regress_bufferevent_openssl,
+ TT_ISOLATED,
+ &basic_setup,
+ T(REGRESS_OPENSSL_SOCKETPAIR | REGRESS_OPENSSL_RENEGOTIATE | REGRESS_OPENSSL_DIRTY_SHUTDOWN) },
+ { "bufferevent_renegotiate_filter_dirty_shutdown",
+ regress_bufferevent_openssl,
+ TT_ISOLATED,
+ &basic_setup,
+ T(REGRESS_OPENSSL_FILTER | REGRESS_OPENSSL_RENEGOTIATE | REGRESS_OPENSSL_DIRTY_SHUTDOWN) },
+ { "bufferevent_socketpair_startopen_dirty_shutdown",
+ regress_bufferevent_openssl,
+ TT_ISOLATED, &basic_setup,
+ T(REGRESS_OPENSSL_SOCKETPAIR | REGRESS_OPENSSL_OPEN | REGRESS_OPENSSL_DIRTY_SHUTDOWN) },
+ { "bufferevent_filter_startopen_dirty_shutdown",
+ regress_bufferevent_openssl,
+ TT_ISOLATED, &basic_setup,
+ T(REGRESS_OPENSSL_FILTER | REGRESS_OPENSSL_OPEN | REGRESS_OPENSSL_DIRTY_SHUTDOWN) },
+
+ { "bufferevent_socketpair_fd", regress_bufferevent_openssl,
+ TT_ISOLATED, &basic_setup,
+ T(REGRESS_OPENSSL_SOCKETPAIR | REGRESS_OPENSSL_FD) },
+ { "bufferevent_socketpair_freed", regress_bufferevent_openssl,
+ TT_ISOLATED, &basic_setup,
+ T(REGRESS_OPENSSL_SOCKETPAIR | REGRESS_OPENSSL_FREED) },
+ { "bufferevent_socketpair_freed_fd", regress_bufferevent_openssl,
+ TT_ISOLATED, &basic_setup,
+ T(REGRESS_OPENSSL_SOCKETPAIR | REGRESS_OPENSSL_FREED | REGRESS_OPENSSL_FD) },
+ { "bufferevent_filter_freed_fd", regress_bufferevent_openssl,
+ TT_ISOLATED, &basic_setup,
+ T(REGRESS_OPENSSL_FILTER | REGRESS_OPENSSL_FREED | REGRESS_OPENSSL_FD) },
+
+ { "bufferevent_socketpair_timeout", regress_bufferevent_openssl,
+ TT_ISOLATED, &basic_setup,
+ T(REGRESS_OPENSSL_SOCKETPAIR | REGRESS_OPENSSL_TIMEOUT) },
+ { "bufferevent_socketpair_timeout_freed_fd", regress_bufferevent_openssl,
+ TT_ISOLATED, &basic_setup,
+ T(REGRESS_OPENSSL_SOCKETPAIR | REGRESS_OPENSSL_TIMEOUT | REGRESS_OPENSSL_FREED | REGRESS_OPENSSL_FD) },
+
+ { "bufferevent_connect", regress_bufferevent_openssl_connect,
+ TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
+ { "bufferevent_connect_sleep", regress_bufferevent_openssl_connect,
+ TT_FORK|TT_NEED_BASE, &basic_setup, T(REGRESS_OPENSSL_SLEEP) },
+
+#undef T
+
+ END_OF_TESTCASES,
+};
diff --git a/libs/libevent/docs/test/regress_testutils.c b/libs/libevent/docs/test/regress_testutils.c
new file mode 100644
index 0000000000..7554a5413f
--- /dev/null
+++ b/libs/libevent/docs/test/regress_testutils.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2010-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "../util-internal.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <windows.h>
+#include <ws2tcpip.h>
+#endif
+
+#include "event2/event-config.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifndef _WIN32
+#include <sys/socket.h>
+#include <signal.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+#endif
+#ifdef EVENT__HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif
+#ifdef HAVE_NETDB_H
+#include <netdb.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include "event2/dns.h"
+#include "event2/dns_struct.h"
+#include "event2/event.h"
+#include "event2/event_compat.h"
+#include "event2/util.h"
+#include "event2/listener.h"
+#include "event2/bufferevent.h"
+#include "log-internal.h"
+#include "regress.h"
+#include "regress_testutils.h"
+
+/* globals */
+static struct evdns_server_port *dns_port;
+evutil_socket_t dns_sock = -1;
+
+/* Helper: return the port that a socket is bound on, in host order. */
+int
+regress_get_socket_port(evutil_socket_t fd)
+{
+ struct sockaddr_storage ss;
+ ev_socklen_t socklen = sizeof(ss);
+ if (getsockname(fd, (struct sockaddr*)&ss, &socklen) != 0)
+ return -1;
+ if (ss.ss_family == AF_INET)
+ return ntohs( ((struct sockaddr_in*)&ss)->sin_port);
+ else if (ss.ss_family == AF_INET6)
+ return ntohs( ((struct sockaddr_in6*)&ss)->sin6_port);
+ else
+ return -1;
+}
+
+struct evdns_server_port *
+regress_get_dnsserver(struct event_base *base,
+ ev_uint16_t *portnum,
+ evutil_socket_t *psock,
+ evdns_request_callback_fn_type cb,
+ void *arg)
+{
+ struct evdns_server_port *port = NULL;
+ evutil_socket_t sock;
+ struct sockaddr_in my_addr;
+
+ sock = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock < 0) {
+ tt_abort_perror("socket");
+ }
+
+ evutil_make_socket_nonblocking(sock);
+
+ memset(&my_addr, 0, sizeof(my_addr));
+ my_addr.sin_family = AF_INET;
+ my_addr.sin_port = htons(*portnum);
+ my_addr.sin_addr.s_addr = htonl(0x7f000001UL);
+ if (bind(sock, (struct sockaddr*)&my_addr, sizeof(my_addr)) < 0) {
+ evutil_closesocket(sock);
+ tt_abort_perror("bind");
+ }
+ port = evdns_add_server_port_with_base(base, sock, 0, cb, arg);
+ if (!*portnum)
+ *portnum = regress_get_socket_port(sock);
+ if (psock)
+ *psock = sock;
+
+ return port;
+end:
+ return NULL;
+}
+
+void
+regress_clean_dnsserver(void)
+{
+ if (dns_port)
+ evdns_close_server_port(dns_port);
+ if (dns_sock >= 0)
+ evutil_closesocket(dns_sock);
+}
+
+static void strtolower(char *s)
+{
+ while (*s) {
+ *s = EVUTIL_TOLOWER_(*s);
+ ++s;
+ }
+}
+void
+regress_dns_server_cb(struct evdns_server_request *req, void *data)
+{
+ struct regress_dns_server_table *tab = data;
+ char *question;
+
+ if (req->nquestions != 1)
+ TT_DIE(("Only handling one question at a time; got %d",
+ req->nquestions));
+
+ question = req->questions[0]->name;
+
+ while (tab->q && evutil_ascii_strcasecmp(question, tab->q) &&
+ strcmp("*", tab->q))
+ ++tab;
+ if (tab->q == NULL)
+ TT_DIE(("Unexpected question: '%s'", question));
+
+ ++tab->seen;
+
+ if (tab->lower)
+ strtolower(question);
+
+ if (!strcmp(tab->anstype, "err")) {
+ int err = atoi(tab->ans);
+ tt_assert(! evdns_server_request_respond(req, err));
+ return;
+ } else if (!strcmp(tab->anstype, "errsoa")) {
+ int err = atoi(tab->ans);
+ char soa_record[] =
+ "\x04" "dns1" "\x05" "icann" "\x03" "org" "\0"
+ "\x0a" "hostmaster" "\x05" "icann" "\x03" "org" "\0"
+ "\x77\xde\x5e\xba" /* serial */
+ "\x00\x00\x1c\x20" /* refreshtime = 2h */
+ "\x00\x00\x0e\x10" /* retry = 1h */
+ "\x00\x12\x75\x00" /* expiration = 14d */
+ "\x00\x00\x0e\x10" /* min.ttl = 1h */
+ ;
+ evdns_server_request_add_reply(
+ req, EVDNS_AUTHORITY_SECTION,
+ "example.com", EVDNS_TYPE_SOA, EVDNS_CLASS_INET,
+ 42, sizeof(soa_record) - 1, 0, soa_record);
+ tt_assert(! evdns_server_request_respond(req, err));
+ return;
+ } else if (!strcmp(tab->anstype, "A")) {
+ struct in_addr in;
+ if (!evutil_inet_pton(AF_INET, tab->ans, &in)) {
+ TT_DIE(("Bad A value %s in table", tab->ans));
+ }
+ evdns_server_request_add_a_reply(req, question, 1, &in.s_addr,
+ 100);
+ } else if (!strcmp(tab->anstype, "AAAA")) {
+ struct in6_addr in6;
+ if (!evutil_inet_pton(AF_INET6, tab->ans, &in6)) {
+ TT_DIE(("Bad AAAA value %s in table", tab->ans));
+ }
+ evdns_server_request_add_aaaa_reply(req,
+ question, 1, &in6.s6_addr, 100);
+ } else {
+ TT_DIE(("Weird table entry with type '%s'", tab->anstype));
+ }
+ tt_assert(! evdns_server_request_respond(req, 0))
+ return;
+end:
+ tt_want(! evdns_server_request_drop(req));
+}
+
+int
+regress_dnsserver(struct event_base *base, ev_uint16_t *port,
+ struct regress_dns_server_table *search_table)
+{
+ dns_port = regress_get_dnsserver(base, port, &dns_sock,
+ regress_dns_server_cb, search_table);
+ return dns_port != NULL;
+}
+
+int
+regress_get_listener_addr(struct evconnlistener *lev,
+ struct sockaddr *sa, ev_socklen_t *socklen)
+{
+ evutil_socket_t s = evconnlistener_get_fd(lev);
+ if (s <= 0)
+ return -1;
+ return getsockname(s, sa, socklen);
+}
diff --git a/libs/libevent/docs/test/regress_testutils.h b/libs/libevent/docs/test/regress_testutils.h
new file mode 100644
index 0000000000..040516a585
--- /dev/null
+++ b/libs/libevent/docs/test/regress_testutils.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2010-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef REGRESS_TESTUTILS_H_INCLUDED_
+#define REGRESS_TESTUTILS_H_INCLUDED_
+
+#include "event2/dns.h"
+
+struct regress_dns_server_table {
+ const char *q;
+ const char *anstype;
+ const char *ans;
+ int seen;
+ int lower;
+};
+
+struct evdns_server_port *
+regress_get_dnsserver(struct event_base *base,
+ ev_uint16_t *portnum,
+ evutil_socket_t *psock,
+ evdns_request_callback_fn_type cb,
+ void *arg);
+
+/* Helper: return the port that a socket is bound on, in host order. */
+int regress_get_socket_port(evutil_socket_t fd);
+
+/* used to look up pre-canned responses in a search table */
+void regress_dns_server_cb(
+ struct evdns_server_request *req, void *data);
+
+/* globally allocates a dns server that serves from a search table */
+int regress_dnsserver(struct event_base *base, ev_uint16_t *port,
+ struct regress_dns_server_table *seach_table);
+
+/* clean up the global dns server resources */
+void regress_clean_dnsserver(void);
+
+struct evconnlistener;
+struct sockaddr;
+int regress_get_listener_addr(struct evconnlistener *lev,
+ struct sockaddr *sa, ev_socklen_t *socklen);
+
+#endif /* REGRESS_TESTUTILS_H_INCLUDED_ */
+
diff --git a/libs/libevent/docs/test/regress_thread.c b/libs/libevent/docs/test/regress_thread.c
new file mode 100644
index 0000000000..9ff6a8fa88
--- /dev/null
+++ b/libs/libevent/docs/test/regress_thread.c
@@ -0,0 +1,590 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "util-internal.h"
+
+/* The old tests here need assertions to work. */
+#undef NDEBUG
+
+#include "event2/event-config.h"
+
+#include <sys/types.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef EVENT__HAVE_SYS_WAIT_H
+#include <sys/wait.h>
+#endif
+
+#ifdef EVENT__HAVE_PTHREADS
+#include <pthread.h>
+#elif defined(_WIN32)
+#include <process.h>
+#endif
+#include <assert.h>
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <time.h>
+
+#include "sys/queue.h"
+
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "event2/thread.h"
+#include "event2/util.h"
+#include "evthread-internal.h"
+#include "event-internal.h"
+#include "defer-internal.h"
+#include "regress.h"
+#include "tinytest_macros.h"
+#include "time-internal.h"
+#include "regress_thread.h"
+
+struct cond_wait {
+ void *lock;
+ void *cond;
+};
+
+static void
+wake_all_timeout(evutil_socket_t fd, short what, void *arg)
+{
+ struct cond_wait *cw = arg;
+ EVLOCK_LOCK(cw->lock, 0);
+ EVTHREAD_COND_BROADCAST(cw->cond);
+ EVLOCK_UNLOCK(cw->lock, 0);
+
+}
+
+static void
+wake_one_timeout(evutil_socket_t fd, short what, void *arg)
+{
+ struct cond_wait *cw = arg;
+ EVLOCK_LOCK(cw->lock, 0);
+ EVTHREAD_COND_SIGNAL(cw->cond);
+ EVLOCK_UNLOCK(cw->lock, 0);
+}
+
+#define NUM_THREADS 100
+#define NUM_ITERATIONS 100
+void *count_lock;
+static int count;
+
+static THREAD_FN
+basic_thread(void *arg)
+{
+ struct cond_wait cw;
+ struct event_base *base = arg;
+ struct event ev;
+ int i = 0;
+
+ EVTHREAD_ALLOC_LOCK(cw.lock, 0);
+ EVTHREAD_ALLOC_COND(cw.cond);
+ assert(cw.lock);
+ assert(cw.cond);
+
+ evtimer_assign(&ev, base, wake_all_timeout, &cw);
+ for (i = 0; i < NUM_ITERATIONS; i++) {
+ struct timeval tv;
+ evutil_timerclear(&tv);
+ tv.tv_sec = 0;
+ tv.tv_usec = 3000;
+
+ EVLOCK_LOCK(cw.lock, 0);
+ /* we need to make sure that event does not happen before
+ * we get to wait on the conditional variable */
+ assert(evtimer_add(&ev, &tv) == 0);
+
+ assert(EVTHREAD_COND_WAIT(cw.cond, cw.lock) == 0);
+ EVLOCK_UNLOCK(cw.lock, 0);
+
+ EVLOCK_LOCK(count_lock, 0);
+ ++count;
+ EVLOCK_UNLOCK(count_lock, 0);
+ }
+
+ /* exit the loop only if all threads fired all timeouts */
+ EVLOCK_LOCK(count_lock, 0);
+ if (count >= NUM_THREADS * NUM_ITERATIONS)
+ event_base_loopexit(base, NULL);
+ EVLOCK_UNLOCK(count_lock, 0);
+
+ EVTHREAD_FREE_LOCK(cw.lock, 0);
+ EVTHREAD_FREE_COND(cw.cond);
+
+ THREAD_RETURN();
+}
+
+static int notification_fd_used = 0;
+#ifndef _WIN32
+static int got_sigchld = 0;
+static void
+sigchld_cb(evutil_socket_t fd, short event, void *arg)
+{
+ struct timeval tv;
+ struct event_base *base = arg;
+
+ got_sigchld++;
+ tv.tv_usec = 100000;
+ tv.tv_sec = 0;
+ event_base_loopexit(base, &tv);
+}
+
+
+static void
+notify_fd_cb(evutil_socket_t fd, short event, void *arg)
+{
+ ++notification_fd_used;
+}
+#endif
+
+static void
+thread_basic(void *arg)
+{
+ THREAD_T threads[NUM_THREADS];
+ struct event ev;
+ struct timeval tv;
+ int i;
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+
+ struct event *notification_event = NULL;
+ struct event *sigchld_event = NULL;
+
+ EVTHREAD_ALLOC_LOCK(count_lock, 0);
+ tt_assert(count_lock);
+
+ tt_assert(base);
+ if (evthread_make_base_notifiable(base)<0) {
+ tt_abort_msg("Couldn't make base notifiable!");
+ }
+
+#ifndef _WIN32
+ if (data->setup_data && !strcmp(data->setup_data, "forking")) {
+ pid_t pid;
+ int status;
+ sigchld_event = evsignal_new(base, SIGCHLD, sigchld_cb, base);
+ /* This piggybacks on the th_notify_fd weirdly, and looks
+ * inside libevent internals. Not a good idea in non-testing
+ * code! */
+ notification_event = event_new(base,
+ base->th_notify_fd[0], EV_READ|EV_PERSIST, notify_fd_cb,
+ NULL);
+ event_add(sigchld_event, NULL);
+ event_add(notification_event, NULL);
+
+ if ((pid = fork()) == 0) {
+ event_del(notification_event);
+ if (event_reinit(base) < 0) {
+ TT_FAIL(("reinit"));
+ exit(1);
+ }
+ event_assign(notification_event, base,
+ base->th_notify_fd[0], EV_READ|EV_PERSIST,
+ notify_fd_cb, NULL);
+ event_add(notification_event, NULL);
+ goto child;
+ }
+
+ event_base_dispatch(base);
+
+ if (waitpid(pid, &status, 0) == -1)
+ tt_abort_perror("waitpid");
+ TT_BLATHER(("Waitpid okay\n"));
+
+ tt_assert(got_sigchld);
+ tt_int_op(notification_fd_used, ==, 0);
+
+ goto end;
+ }
+
+child:
+#endif
+ for (i = 0; i < NUM_THREADS; ++i)
+ THREAD_START(threads[i], basic_thread, base);
+
+ evtimer_assign(&ev, base, NULL, NULL);
+ evutil_timerclear(&tv);
+ tv.tv_sec = 1000;
+ event_add(&ev, &tv);
+
+ event_base_dispatch(base);
+
+ for (i = 0; i < NUM_THREADS; ++i)
+ THREAD_JOIN(threads[i]);
+
+ event_del(&ev);
+
+ tt_int_op(count, ==, NUM_THREADS * NUM_ITERATIONS);
+
+ EVTHREAD_FREE_LOCK(count_lock, 0);
+
+ TT_BLATHER(("notifiations==%d", notification_fd_used));
+
+end:
+
+ if (notification_event)
+ event_free(notification_event);
+ if (sigchld_event)
+ event_free(sigchld_event);
+}
+
+#undef NUM_THREADS
+#define NUM_THREADS 10
+
+struct alerted_record {
+ struct cond_wait *cond;
+ struct timeval delay;
+ struct timeval alerted_at;
+ int timed_out;
+};
+
+static THREAD_FN
+wait_for_condition(void *arg)
+{
+ struct alerted_record *rec = arg;
+ int r;
+
+ EVLOCK_LOCK(rec->cond->lock, 0);
+ if (rec->delay.tv_sec || rec->delay.tv_usec) {
+ r = EVTHREAD_COND_WAIT_TIMED(rec->cond->cond, rec->cond->lock,
+ &rec->delay);
+ } else {
+ r = EVTHREAD_COND_WAIT(rec->cond->cond, rec->cond->lock);
+ }
+ EVLOCK_UNLOCK(rec->cond->lock, 0);
+
+ evutil_gettimeofday(&rec->alerted_at, NULL);
+ if (r == 1)
+ rec->timed_out = 1;
+
+ THREAD_RETURN();
+}
+
+static void
+thread_conditions_simple(void *arg)
+{
+ struct timeval tv_signal, tv_timeout, tv_broadcast;
+ struct alerted_record alerted[NUM_THREADS];
+ THREAD_T threads[NUM_THREADS];
+ struct cond_wait cond;
+ int i;
+ struct timeval launched_at;
+ struct event wake_one;
+ struct event wake_all;
+ struct basic_test_data *data = arg;
+ struct event_base *base = data->base;
+ int n_timed_out=0, n_signal=0, n_broadcast=0;
+
+ tv_signal.tv_sec = tv_timeout.tv_sec = tv_broadcast.tv_sec = 0;
+ tv_signal.tv_usec = 30*1000;
+ tv_timeout.tv_usec = 150*1000;
+ tv_broadcast.tv_usec = 500*1000;
+
+ EVTHREAD_ALLOC_LOCK(cond.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ EVTHREAD_ALLOC_COND(cond.cond);
+ tt_assert(cond.lock);
+ tt_assert(cond.cond);
+ for (i = 0; i < NUM_THREADS; ++i) {
+ memset(&alerted[i], 0, sizeof(struct alerted_record));
+ alerted[i].cond = &cond;
+ }
+
+ /* Threads 5 and 6 will be allowed to time out */
+ memcpy(&alerted[5].delay, &tv_timeout, sizeof(tv_timeout));
+ memcpy(&alerted[6].delay, &tv_timeout, sizeof(tv_timeout));
+
+ evtimer_assign(&wake_one, base, wake_one_timeout, &cond);
+ evtimer_assign(&wake_all, base, wake_all_timeout, &cond);
+
+ evutil_gettimeofday(&launched_at, NULL);
+
+ /* Launch the threads... */
+ for (i = 0; i < NUM_THREADS; ++i) {
+ THREAD_START(threads[i], wait_for_condition, &alerted[i]);
+ }
+
+ /* Start the timers... */
+ tt_int_op(event_add(&wake_one, &tv_signal), ==, 0);
+ tt_int_op(event_add(&wake_all, &tv_broadcast), ==, 0);
+
+ /* And run for a bit... */
+ event_base_dispatch(base);
+
+ /* And wait till the threads are done. */
+ for (i = 0; i < NUM_THREADS; ++i)
+ THREAD_JOIN(threads[i]);
+
+ /* Now, let's see what happened. At least one of 5 or 6 should
+ * have timed out. */
+ n_timed_out = alerted[5].timed_out + alerted[6].timed_out;
+ tt_int_op(n_timed_out, >=, 1);
+ tt_int_op(n_timed_out, <=, 2);
+
+ for (i = 0; i < NUM_THREADS; ++i) {
+ const struct timeval *target_delay;
+ struct timeval target_time, actual_delay;
+ if (alerted[i].timed_out) {
+ TT_BLATHER(("%d looks like a timeout\n", i));
+ target_delay = &tv_timeout;
+ tt_assert(i == 5 || i == 6);
+ } else if (evutil_timerisset(&alerted[i].alerted_at)) {
+ long diff1,diff2;
+ evutil_timersub(&alerted[i].alerted_at,
+ &launched_at, &actual_delay);
+ diff1 = timeval_msec_diff(&actual_delay,
+ &tv_signal);
+ diff2 = timeval_msec_diff(&actual_delay,
+ &tv_broadcast);
+ if (labs(diff1) < labs(diff2)) {
+ TT_BLATHER(("%d looks like a signal\n", i));
+ target_delay = &tv_signal;
+ ++n_signal;
+ } else {
+ TT_BLATHER(("%d looks like a broadcast\n", i));
+ target_delay = &tv_broadcast;
+ ++n_broadcast;
+ }
+ } else {
+ TT_FAIL(("Thread %d never got woken", i));
+ continue;
+ }
+ evutil_timeradd(target_delay, &launched_at, &target_time);
+ test_timeval_diff_leq(&target_time, &alerted[i].alerted_at,
+ 0, 50);
+ }
+ tt_int_op(n_broadcast + n_signal + n_timed_out, ==, NUM_THREADS);
+ tt_int_op(n_signal, ==, 1);
+
+end:
+ EVTHREAD_FREE_LOCK(cond.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ EVTHREAD_FREE_COND(cond.cond);
+}
+
+#define CB_COUNT 128
+#define QUEUE_THREAD_COUNT 8
+
+static void
+SLEEP_MS(int ms)
+{
+ struct timeval tv;
+ tv.tv_sec = ms/1000;
+ tv.tv_usec = (ms%1000)*1000;
+ evutil_usleep_(&tv);
+}
+
+struct deferred_test_data {
+ struct event_callback cbs[CB_COUNT];
+ struct event_base *queue;
+};
+
+static struct timeval timer_start = {0,0};
+static struct timeval timer_end = {0,0};
+static unsigned callback_count = 0;
+static THREAD_T load_threads[QUEUE_THREAD_COUNT];
+static struct deferred_test_data deferred_data[QUEUE_THREAD_COUNT];
+
+static void
+deferred_callback(struct event_callback *cb, void *arg)
+{
+ SLEEP_MS(1);
+ callback_count += 1;
+}
+
+static THREAD_FN
+load_deferred_queue(void *arg)
+{
+ struct deferred_test_data *data = arg;
+ size_t i;
+
+ for (i = 0; i < CB_COUNT; ++i) {
+ event_deferred_cb_init_(&data->cbs[i], 0, deferred_callback,
+ NULL);
+ event_deferred_cb_schedule_(data->queue, &data->cbs[i]);
+ SLEEP_MS(1);
+ }
+
+ THREAD_RETURN();
+}
+
+static void
+timer_callback(evutil_socket_t fd, short what, void *arg)
+{
+ evutil_gettimeofday(&timer_end, NULL);
+}
+
+static void
+start_threads_callback(evutil_socket_t fd, short what, void *arg)
+{
+ int i;
+
+ for (i = 0; i < QUEUE_THREAD_COUNT; ++i) {
+ THREAD_START(load_threads[i], load_deferred_queue,
+ &deferred_data[i]);
+ }
+}
+
+static void
+thread_deferred_cb_skew(void *arg)
+{
+ struct timeval tv_timer = {1, 0};
+ struct event_base *base = NULL;
+ struct event_config *cfg = NULL;
+ struct timeval elapsed;
+ int elapsed_usec;
+ int i;
+
+ cfg = event_config_new();
+ tt_assert(cfg);
+ event_config_set_max_dispatch_interval(cfg, NULL, 16, 0);
+
+ base = event_base_new_with_config(cfg);
+ tt_assert(base);
+
+ for (i = 0; i < QUEUE_THREAD_COUNT; ++i)
+ deferred_data[i].queue = base;
+
+ evutil_gettimeofday(&timer_start, NULL);
+ event_base_once(base, -1, EV_TIMEOUT, timer_callback, NULL,
+ &tv_timer);
+ event_base_once(base, -1, EV_TIMEOUT, start_threads_callback,
+ NULL, NULL);
+ event_base_dispatch(base);
+
+ evutil_timersub(&timer_end, &timer_start, &elapsed);
+ TT_BLATHER(("callback count, %u", callback_count));
+ elapsed_usec =
+ (unsigned)(elapsed.tv_sec*1000000 + elapsed.tv_usec);
+ TT_BLATHER(("elapsed time, %u usec", elapsed_usec));
+
+ /* XXX be more intelligent here. just make sure skew is
+ * within .4 seconds for now. */
+ tt_assert(elapsed_usec >= 600000 && elapsed_usec <= 1400000);
+
+end:
+ for (i = 0; i < QUEUE_THREAD_COUNT; ++i)
+ THREAD_JOIN(load_threads[i]);
+ if (base)
+ event_base_free(base);
+ if (cfg)
+ event_config_free(cfg);
+}
+
+static struct event time_events[5];
+static struct timeval times[5];
+static struct event_base *exit_base = NULL;
+static void
+note_time_cb(evutil_socket_t fd, short what, void *arg)
+{
+ evutil_gettimeofday(arg, NULL);
+ if (arg == &times[4]) {
+ event_base_loopbreak(exit_base);
+ }
+}
+static THREAD_FN
+register_events_subthread(void *arg)
+{
+ struct timeval tv = {0,0};
+ SLEEP_MS(100);
+ event_active(&time_events[0], EV_TIMEOUT, 1);
+ SLEEP_MS(100);
+ event_active(&time_events[1], EV_TIMEOUT, 1);
+ SLEEP_MS(100);
+ tv.tv_usec = 100*1000;
+ event_add(&time_events[2], &tv);
+ tv.tv_usec = 150*1000;
+ event_add(&time_events[3], &tv);
+ SLEEP_MS(200);
+ event_active(&time_events[4], EV_TIMEOUT, 1);
+
+ THREAD_RETURN();
+}
+
+static void
+thread_no_events(void *arg)
+{
+ THREAD_T thread;
+ struct basic_test_data *data = arg;
+ struct timeval starttime, endtime;
+ int i;
+ exit_base = data->base;
+
+ memset(times,0,sizeof(times));
+ for (i=0;i<5;++i) {
+ event_assign(&time_events[i], data->base,
+ -1, 0, note_time_cb, &times[i]);
+ }
+
+ evutil_gettimeofday(&starttime, NULL);
+ THREAD_START(thread, register_events_subthread, data->base);
+ event_base_loop(data->base, EVLOOP_NO_EXIT_ON_EMPTY);
+ evutil_gettimeofday(&endtime, NULL);
+ tt_assert(event_base_got_break(data->base));
+ THREAD_JOIN(thread);
+ for (i=0; i<5; ++i) {
+ struct timeval diff;
+ double sec;
+ evutil_timersub(&times[i], &starttime, &diff);
+ sec = diff.tv_sec + diff.tv_usec/1.0e6;
+ TT_BLATHER(("event %d at %.4f seconds", i, sec));
+ }
+ test_timeval_diff_eq(&starttime, &times[0], 100);
+ test_timeval_diff_eq(&starttime, &times[1], 200);
+ test_timeval_diff_eq(&starttime, &times[2], 400);
+ test_timeval_diff_eq(&starttime, &times[3], 450);
+ test_timeval_diff_eq(&starttime, &times[4], 500);
+ test_timeval_diff_eq(&starttime, &endtime, 500);
+
+end:
+ ;
+}
+
+#define TEST(name) \
+ { #name, thread_##name, TT_FORK|TT_NEED_THREADS|TT_NEED_BASE, \
+ &basic_setup, NULL }
+
+struct testcase_t thread_testcases[] = {
+ { "basic", thread_basic, TT_FORK|TT_NEED_THREADS|TT_NEED_BASE,
+ &basic_setup, NULL },
+#ifndef _WIN32
+ { "forking", thread_basic, TT_FORK|TT_NEED_THREADS|TT_NEED_BASE,
+ &basic_setup, (char*)"forking" },
+#endif
+ TEST(conditions_simple),
+ { "deferred_cb_skew", thread_deferred_cb_skew,
+ TT_FORK|TT_NEED_THREADS|TT_OFF_BY_DEFAULT,
+ &basic_setup, NULL },
+#ifndef _WIN32
+ /****** XXX TODO FIXME windows seems to be having some timing trouble,
+ * looking into it now. / ellzey
+ ******/
+ TEST(no_events),
+#endif
+ END_OF_TESTCASES
+};
+
diff --git a/libs/libevent/docs/test/regress_thread.h b/libs/libevent/docs/test/regress_thread.h
new file mode 100644
index 0000000000..831b51e507
--- /dev/null
+++ b/libs/libevent/docs/test/regress_thread.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef REGRESS_THREAD_H_INCLUDED_
+#define REGRESS_THREAD_H_INCLUDED_
+
+#ifdef EVENT__HAVE_PTHREADS
+#define THREAD_T pthread_t
+#define THREAD_FN void *
+#define THREAD_RETURN() return (NULL)
+#define THREAD_START(threadvar, fn, arg) \
+ pthread_create(&(threadvar), NULL, fn, arg)
+#define THREAD_JOIN(th) pthread_join(th, NULL)
+#else
+#define THREAD_T HANDLE
+#define THREAD_FN unsigned __stdcall
+#define THREAD_RETURN() return (0)
+#define THREAD_START(threadvar, fn, arg) do { \
+ uintptr_t threadhandle = _beginthreadex(NULL,0,fn,(arg),0,NULL); \
+ (threadvar) = (HANDLE) threadhandle; \
+ } while (0)
+#define THREAD_JOIN(th) WaitForSingleObject(th, INFINITE)
+#endif
+
+#endif
diff --git a/libs/libevent/docs/test/regress_util.c b/libs/libevent/docs/test/regress_util.c
new file mode 100644
index 0000000000..60f085bf1d
--- /dev/null
+++ b/libs/libevent/docs/test/regress_util.c
@@ -0,0 +1,1413 @@
+/*
+ * Copyright (c) 2009-2012 Nick Mathewson and Niels Provos
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "../util-internal.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <windows.h>
+#include <ws2tcpip.h>
+#endif
+
+#include "event2/event-config.h"
+
+#include <sys/types.h>
+
+#ifndef _WIN32
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+#endif
+#ifdef EVENT__HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif
+#ifdef EVENT__HAVE_SYS_WAIT_H
+#include <sys/wait.h>
+#endif
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "event2/event.h"
+#include "event2/util.h"
+#include "../ipv6-internal.h"
+#include "../log-internal.h"
+#include "../strlcpy-internal.h"
+#include "../mm-internal.h"
+#include "../time-internal.h"
+
+#include "regress.h"
+
+enum entry_status { NORMAL, CANONICAL, BAD };
+
+/* This is a big table of results we expect from generating and parsing */
+static struct ipv4_entry {
+ const char *addr;
+ ev_uint32_t res;
+ enum entry_status status;
+} ipv4_entries[] = {
+ { "1.2.3.4", 0x01020304u, CANONICAL },
+ { "255.255.255.255", 0xffffffffu, CANONICAL },
+ { "256.0.0.0", 0, BAD },
+ { "ABC", 0, BAD },
+ { "1.2.3.4.5", 0, BAD },
+ { "176.192.208.244", 0xb0c0d0f4, CANONICAL },
+ { NULL, 0, BAD },
+};
+
+static struct ipv6_entry {
+ const char *addr;
+ ev_uint32_t res[4];
+ enum entry_status status;
+} ipv6_entries[] = {
+ { "::", { 0, 0, 0, 0, }, CANONICAL },
+ { "0:0:0:0:0:0:0:0", { 0, 0, 0, 0, }, NORMAL },
+ { "::1", { 0, 0, 0, 1, }, CANONICAL },
+ { "::1.2.3.4", { 0, 0, 0, 0x01020304, }, CANONICAL },
+ { "ffff:1::", { 0xffff0001u, 0, 0, 0, }, CANONICAL },
+ { "ffff:0000::", { 0xffff0000u, 0, 0, 0, }, NORMAL },
+ { "ffff::1234", { 0xffff0000u, 0, 0, 0x1234, }, CANONICAL },
+ { "0102::1.2.3.4", {0x01020000u, 0, 0, 0x01020304u }, NORMAL },
+ { "::9:c0a8:1:1", { 0, 0, 0x0009c0a8u, 0x00010001u }, CANONICAL },
+ { "::ffff:1.2.3.4", { 0, 0, 0x000ffffu, 0x01020304u }, CANONICAL },
+ { "FFFF::", { 0xffff0000u, 0, 0, 0 }, NORMAL },
+ { "foobar.", { 0, 0, 0, 0 }, BAD },
+ { "foobar", { 0, 0, 0, 0 }, BAD },
+ { "fo:obar", { 0, 0, 0, 0 }, BAD },
+ { "ffff", { 0, 0, 0, 0 }, BAD },
+ { "fffff::", { 0, 0, 0, 0 }, BAD },
+ { "fffff::", { 0, 0, 0, 0 }, BAD },
+ { "::1.0.1.1000", { 0, 0, 0, 0 }, BAD },
+ { "1:2:33333:4::", { 0, 0, 0, 0 }, BAD },
+ { "1:2:3:4:5:6:7:8:9", { 0, 0, 0, 0 }, BAD },
+ { "1::2::3", { 0, 0, 0, 0 }, BAD },
+ { ":::1", { 0, 0, 0, 0 }, BAD },
+ { NULL, { 0, 0, 0, 0, }, BAD },
+};
+
+static void
+regress_ipv4_parse(void *ptr)
+{
+ int i;
+ for (i = 0; ipv4_entries[i].addr; ++i) {
+ char written[128];
+ struct ipv4_entry *ent = &ipv4_entries[i];
+ struct in_addr in;
+ int r;
+ r = evutil_inet_pton(AF_INET, ent->addr, &in);
+ if (r == 0) {
+ if (ent->status != BAD) {
+ TT_FAIL(("%s did not parse, but it's a good address!",
+ ent->addr));
+ }
+ continue;
+ }
+ if (ent->status == BAD) {
+ TT_FAIL(("%s parsed, but we expected an error", ent->addr));
+ continue;
+ }
+ if (ntohl(in.s_addr) != ent->res) {
+ TT_FAIL(("%s parsed to %lx, but we expected %lx", ent->addr,
+ (unsigned long)ntohl(in.s_addr),
+ (unsigned long)ent->res));
+ continue;
+ }
+ if (ent->status == CANONICAL) {
+ const char *w = evutil_inet_ntop(AF_INET, &in, written,
+ sizeof(written));
+ if (!w) {
+ TT_FAIL(("Tried to write out %s; got NULL.", ent->addr));
+ continue;
+ }
+ if (strcmp(written, ent->addr)) {
+ TT_FAIL(("Tried to write out %s; got %s",
+ ent->addr, written));
+ continue;
+ }
+ }
+
+ }
+
+}
+
+static void
+regress_ipv6_parse(void *ptr)
+{
+#ifdef AF_INET6
+ int i, j;
+
+ for (i = 0; ipv6_entries[i].addr; ++i) {
+ char written[128];
+ struct ipv6_entry *ent = &ipv6_entries[i];
+ struct in6_addr in6;
+ int r;
+ r = evutil_inet_pton(AF_INET6, ent->addr, &in6);
+ if (r == 0) {
+ if (ent->status != BAD)
+ TT_FAIL(("%s did not parse, but it's a good address!",
+ ent->addr));
+ continue;
+ }
+ if (ent->status == BAD) {
+ TT_FAIL(("%s parsed, but we expected an error", ent->addr));
+ continue;
+ }
+ for (j = 0; j < 4; ++j) {
+ /* Can't use s6_addr32 here; some don't have it. */
+ ev_uint32_t u =
+ ((ev_uint32_t)in6.s6_addr[j*4 ] << 24) |
+ ((ev_uint32_t)in6.s6_addr[j*4+1] << 16) |
+ ((ev_uint32_t)in6.s6_addr[j*4+2] << 8) |
+ ((ev_uint32_t)in6.s6_addr[j*4+3]);
+ if (u != ent->res[j]) {
+ TT_FAIL(("%s did not parse as expected.", ent->addr));
+ continue;
+ }
+ }
+ if (ent->status == CANONICAL) {
+ const char *w = evutil_inet_ntop(AF_INET6, &in6, written,
+ sizeof(written));
+ if (!w) {
+ TT_FAIL(("Tried to write out %s; got NULL.", ent->addr));
+ continue;
+ }
+ if (strcmp(written, ent->addr)) {
+ TT_FAIL(("Tried to write out %s; got %s", ent->addr, written));
+ continue;
+ }
+ }
+
+ }
+#else
+ TT_BLATHER(("Skipping IPv6 address parsing."));
+#endif
+}
+
+static struct sa_port_ent {
+ const char *parse;
+ int safamily;
+ const char *addr;
+ int port;
+} sa_port_ents[] = {
+ { "[ffff::1]:1000", AF_INET6, "ffff::1", 1000 },
+ { "[ffff::1]", AF_INET6, "ffff::1", 0 },
+ { "[ffff::1", 0, NULL, 0 },
+ { "[ffff::1]:65599", 0, NULL, 0 },
+ { "[ffff::1]:0", 0, NULL, 0 },
+ { "[ffff::1]:-1", 0, NULL, 0 },
+ { "::1", AF_INET6, "::1", 0 },
+ { "1:2::1", AF_INET6, "1:2::1", 0 },
+ { "192.168.0.1:50", AF_INET, "192.168.0.1", 50 },
+ { "1.2.3.4", AF_INET, "1.2.3.4", 0 },
+ { NULL, 0, NULL, 0 },
+};
+
+static void
+regress_sockaddr_port_parse(void *ptr)
+{
+ struct sockaddr_storage ss;
+ int i, r;
+
+ for (i = 0; sa_port_ents[i].parse; ++i) {
+ struct sa_port_ent *ent = &sa_port_ents[i];
+ int len = sizeof(ss);
+ memset(&ss, 0, sizeof(ss));
+ r = evutil_parse_sockaddr_port(ent->parse, (struct sockaddr*)&ss, &len);
+ if (r < 0) {
+ if (ent->safamily)
+ TT_FAIL(("Couldn't parse %s!", ent->parse));
+ continue;
+ } else if (! ent->safamily) {
+ TT_FAIL(("Shouldn't have been able to parse %s!", ent->parse));
+ continue;
+ }
+ if (ent->safamily == AF_INET) {
+ struct sockaddr_in sin;
+ memset(&sin, 0, sizeof(sin));
+#ifdef EVENT__HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
+ sin.sin_len = sizeof(sin);
+#endif
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(ent->port);
+ r = evutil_inet_pton(AF_INET, ent->addr, &sin.sin_addr);
+ if (1 != r) {
+ TT_FAIL(("Couldn't parse ipv4 target %s.", ent->addr));
+ } else if (memcmp(&sin, &ss, sizeof(sin))) {
+ TT_FAIL(("Parse for %s was not as expected.", ent->parse));
+ } else if (len != sizeof(sin)) {
+ TT_FAIL(("Length for %s not as expected.",ent->parse));
+ }
+ } else {
+ struct sockaddr_in6 sin6;
+ memset(&sin6, 0, sizeof(sin6));
+#ifdef EVENT__HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN
+ sin6.sin6_len = sizeof(sin6);
+#endif
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = htons(ent->port);
+ r = evutil_inet_pton(AF_INET6, ent->addr, &sin6.sin6_addr);
+ if (1 != r) {
+ TT_FAIL(("Couldn't parse ipv6 target %s.", ent->addr));
+ } else if (memcmp(&sin6, &ss, sizeof(sin6))) {
+ TT_FAIL(("Parse for %s was not as expected.", ent->parse));
+ } else if (len != sizeof(sin6)) {
+ TT_FAIL(("Length for %s not as expected.",ent->parse));
+ }
+ }
+ }
+}
+
+
+static void
+regress_sockaddr_port_format(void *ptr)
+{
+ struct sockaddr_storage ss;
+ int len;
+ const char *cp;
+ char cbuf[128];
+ int r;
+
+ len = sizeof(ss);
+ r = evutil_parse_sockaddr_port("192.168.1.1:80",
+ (struct sockaddr*)&ss, &len);
+ tt_int_op(r,==,0);
+ cp = evutil_format_sockaddr_port_(
+ (struct sockaddr*)&ss, cbuf, sizeof(cbuf));
+ tt_ptr_op(cp,==,cbuf);
+ tt_str_op(cp,==,"192.168.1.1:80");
+
+ len = sizeof(ss);
+ r = evutil_parse_sockaddr_port("[ff00::8010]:999",
+ (struct sockaddr*)&ss, &len);
+ tt_int_op(r,==,0);
+ cp = evutil_format_sockaddr_port_(
+ (struct sockaddr*)&ss, cbuf, sizeof(cbuf));
+ tt_ptr_op(cp,==,cbuf);
+ tt_str_op(cp,==,"[ff00::8010]:999");
+
+ ss.ss_family=99;
+ cp = evutil_format_sockaddr_port_(
+ (struct sockaddr*)&ss, cbuf, sizeof(cbuf));
+ tt_ptr_op(cp,==,cbuf);
+ tt_str_op(cp,==,"<addr with socktype 99>");
+end:
+ ;
+}
+
+static struct sa_pred_ent {
+ const char *parse;
+
+ int is_loopback;
+} sa_pred_entries[] = {
+ { "127.0.0.1", 1 },
+ { "127.0.3.2", 1 },
+ { "128.1.2.3", 0 },
+ { "18.0.0.1", 0 },
+ { "129.168.1.1", 0 },
+
+ { "::1", 1 },
+ { "::0", 0 },
+ { "f::1", 0 },
+ { "::501", 0 },
+ { NULL, 0 },
+
+};
+
+static void
+test_evutil_sockaddr_predicates(void *ptr)
+{
+ struct sockaddr_storage ss;
+ int r, i;
+
+ for (i=0; sa_pred_entries[i].parse; ++i) {
+ struct sa_pred_ent *ent = &sa_pred_entries[i];
+ int len = sizeof(ss);
+
+ r = evutil_parse_sockaddr_port(ent->parse, (struct sockaddr*)&ss, &len);
+
+ if (r<0) {
+ TT_FAIL(("Couldn't parse %s!", ent->parse));
+ continue;
+ }
+
+ /* sockaddr_is_loopback */
+ if (ent->is_loopback != evutil_sockaddr_is_loopback_((struct sockaddr*)&ss)) {
+ TT_FAIL(("evutil_sockaddr_loopback(%s) not as expected",
+ ent->parse));
+ }
+ }
+}
+
+static void
+test_evutil_strtoll(void *ptr)
+{
+ const char *s;
+ char *endptr;
+
+ tt_want(evutil_strtoll("5000000000", NULL, 10) ==
+ ((ev_int64_t)5000000)*1000);
+ tt_want(evutil_strtoll("-5000000000", NULL, 10) ==
+ ((ev_int64_t)5000000)*-1000);
+ s = " 99999stuff";
+ tt_want(evutil_strtoll(s, &endptr, 10) == (ev_int64_t)99999);
+ tt_want(endptr == s+6);
+ tt_want(evutil_strtoll("foo", NULL, 10) == 0);
+ }
+
+static void
+test_evutil_snprintf(void *ptr)
+{
+ char buf[16];
+ int r;
+ ev_uint64_t u64 = ((ev_uint64_t)1000000000)*200;
+ ev_int64_t i64 = -1 * (ev_int64_t) u64;
+ size_t size = 8000;
+ ev_ssize_t ssize = -9000;
+
+ r = evutil_snprintf(buf, sizeof(buf), "%d %d", 50, 100);
+ tt_str_op(buf, ==, "50 100");
+ tt_int_op(r, ==, 6);
+
+ r = evutil_snprintf(buf, sizeof(buf), "longish %d", 1234567890);
+ tt_str_op(buf, ==, "longish 1234567");
+ tt_int_op(r, ==, 18);
+
+ r = evutil_snprintf(buf, sizeof(buf), EV_U64_FMT, EV_U64_ARG(u64));
+ tt_str_op(buf, ==, "200000000000");
+ tt_int_op(r, ==, 12);
+
+ r = evutil_snprintf(buf, sizeof(buf), EV_I64_FMT, EV_I64_ARG(i64));
+ tt_str_op(buf, ==, "-200000000000");
+ tt_int_op(r, ==, 13);
+
+ r = evutil_snprintf(buf, sizeof(buf), EV_SIZE_FMT" "EV_SSIZE_FMT,
+ EV_SIZE_ARG(size), EV_SSIZE_ARG(ssize));
+ tt_str_op(buf, ==, "8000 -9000");
+ tt_int_op(r, ==, 10);
+
+ end:
+ ;
+}
+
+static void
+test_evutil_casecmp(void *ptr)
+{
+ tt_int_op(evutil_ascii_strcasecmp("ABC", "ABC"), ==, 0);
+ tt_int_op(evutil_ascii_strcasecmp("ABC", "abc"), ==, 0);
+ tt_int_op(evutil_ascii_strcasecmp("ABC", "abcd"), <, 0);
+ tt_int_op(evutil_ascii_strcasecmp("ABC", "abb"), >, 0);
+ tt_int_op(evutil_ascii_strcasecmp("ABCd", "abc"), >, 0);
+
+ tt_int_op(evutil_ascii_strncasecmp("Libevent", "LibEvEnT", 100), ==, 0);
+ tt_int_op(evutil_ascii_strncasecmp("Libevent", "LibEvEnT", 4), ==, 0);
+ tt_int_op(evutil_ascii_strncasecmp("Libevent", "LibEXXXX", 4), ==, 0);
+ tt_int_op(evutil_ascii_strncasecmp("Libevent", "LibE", 4), ==, 0);
+ tt_int_op(evutil_ascii_strncasecmp("Libe", "LibEvEnT", 4), ==, 0);
+ tt_int_op(evutil_ascii_strncasecmp("Lib", "LibEvEnT", 4), <, 0);
+ tt_int_op(evutil_ascii_strncasecmp("abc", "def", 99), <, 0);
+ tt_int_op(evutil_ascii_strncasecmp("Z", "qrst", 1), >, 0);
+end:
+ ;
+}
+
+static void
+test_evutil_rtrim(void *ptr)
+{
+#define TEST_TRIM(s, result) \
+ do { \
+ if (cp) mm_free(cp); \
+ cp = mm_strdup(s); \
+ tt_assert(cp); \
+ evutil_rtrim_lws_(cp); \
+ tt_str_op(cp, ==, result); \
+ } while(0)
+
+ char *cp = NULL;
+ (void) ptr;
+
+ TEST_TRIM("", "");
+ TEST_TRIM("a", "a");
+ TEST_TRIM("abcdef ghi", "abcdef ghi");
+
+ TEST_TRIM(" ", "");
+ TEST_TRIM(" ", "");
+ TEST_TRIM("a ", "a");
+ TEST_TRIM("abcdef gH ", "abcdef gH");
+
+ TEST_TRIM("\t\t", "");
+ TEST_TRIM(" \t", "");
+ TEST_TRIM("\t", "");
+ TEST_TRIM("a \t", "a");
+ TEST_TRIM("a\t ", "a");
+ TEST_TRIM("a\t", "a");
+ TEST_TRIM("abcdef gH \t ", "abcdef gH");
+
+end:
+ if (cp)
+ mm_free(cp);
+}
+
+static int logsev = 0;
+static char *logmsg = NULL;
+
+static void
+logfn(int severity, const char *msg)
+{
+ logsev = severity;
+ tt_want(msg);
+ if (msg) {
+ if (logmsg)
+ free(logmsg);
+ logmsg = strdup(msg);
+ }
+}
+
+static int fatal_want_severity = 0;
+static const char *fatal_want_message = NULL;
+static void
+fatalfn(int exitcode)
+{
+ if (logsev != fatal_want_severity ||
+ !logmsg ||
+ strcmp(logmsg, fatal_want_message))
+ exit(0);
+ else
+ exit(exitcode);
+}
+
+#ifndef _WIN32
+#define CAN_CHECK_ERR
+static void
+check_error_logging(void (*fn)(void), int wantexitcode,
+ int wantseverity, const char *wantmsg)
+{
+ pid_t pid;
+ int status = 0, exitcode;
+ fatal_want_severity = wantseverity;
+ fatal_want_message = wantmsg;
+ if ((pid = regress_fork()) == 0) {
+ /* child process */
+ fn();
+ exit(0); /* should be unreachable. */
+ } else {
+ wait(&status);
+ exitcode = WEXITSTATUS(status);
+ tt_int_op(wantexitcode, ==, exitcode);
+ }
+end:
+ ;
+}
+
+static void
+errx_fn(void)
+{
+ event_errx(2, "Fatal error; too many kumquats (%d)", 5);
+}
+
+static void
+err_fn(void)
+{
+ errno = ENOENT;
+ event_err(5,"Couldn't open %s", "/very/bad/file");
+}
+
+static void
+sock_err_fn(void)
+{
+ evutil_socket_t fd = socket(AF_INET, SOCK_STREAM, 0);
+#ifdef _WIN32
+ EVUTIL_SET_SOCKET_ERROR(WSAEWOULDBLOCK);
+#else
+ errno = EAGAIN;
+#endif
+ event_sock_err(20, fd, "Unhappy socket");
+}
+#endif
+
+static void
+test_evutil_log(void *ptr)
+{
+ evutil_socket_t fd = -1;
+ char buf[128];
+
+ event_set_log_callback(logfn);
+ event_set_fatal_callback(fatalfn);
+#define RESET() do { \
+ logsev = 0; \
+ if (logmsg) free(logmsg); \
+ logmsg = NULL; \
+ } while (0)
+#define LOGEQ(sev,msg) do { \
+ tt_int_op(logsev,==,sev); \
+ tt_assert(logmsg != NULL); \
+ tt_str_op(logmsg,==,msg); \
+ } while (0)
+
+#ifdef CAN_CHECK_ERR
+ /* We need to disable these tests for now. Previously, the logging
+ * module didn't enforce the requirement that a fatal callback
+ * actually exit. Now, it exits no matter what, so if we wan to
+ * reinstate these tests, we'll need to fork for each one. */
+ check_error_logging(errx_fn, 2, EVENT_LOG_ERR,
+ "Fatal error; too many kumquats (5)");
+ RESET();
+#endif
+
+ event_warnx("Far too many %s (%d)", "wombats", 99);
+ LOGEQ(EVENT_LOG_WARN, "Far too many wombats (99)");
+ RESET();
+
+ event_msgx("Connecting lime to coconut");
+ LOGEQ(EVENT_LOG_MSG, "Connecting lime to coconut");
+ RESET();
+
+ event_debug(("A millisecond passed! We should log that!"));
+#ifdef USE_DEBUG
+ LOGEQ(EVENT_LOG_DEBUG, "A millisecond passed! We should log that!");
+#else
+ tt_int_op(logsev,==,0);
+ tt_ptr_op(logmsg,==,NULL);
+#endif
+ RESET();
+
+ /* Try with an errno. */
+ errno = ENOENT;
+ event_warn("Couldn't open %s", "/bad/file");
+ evutil_snprintf(buf, sizeof(buf),
+ "Couldn't open /bad/file: %s",strerror(ENOENT));
+ LOGEQ(EVENT_LOG_WARN,buf);
+ RESET();
+
+#ifdef CAN_CHECK_ERR
+ evutil_snprintf(buf, sizeof(buf),
+ "Couldn't open /very/bad/file: %s",strerror(ENOENT));
+ check_error_logging(err_fn, 5, EVENT_LOG_ERR, buf);
+ RESET();
+#endif
+
+ /* Try with a socket errno. */
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+#ifdef _WIN32
+ evutil_snprintf(buf, sizeof(buf),
+ "Unhappy socket: %s",
+ evutil_socket_error_to_string(WSAEWOULDBLOCK));
+ EVUTIL_SET_SOCKET_ERROR(WSAEWOULDBLOCK);
+#else
+ evutil_snprintf(buf, sizeof(buf),
+ "Unhappy socket: %s", strerror(EAGAIN));
+ errno = EAGAIN;
+#endif
+ event_sock_warn(fd, "Unhappy socket");
+ LOGEQ(EVENT_LOG_WARN, buf);
+ RESET();
+
+#ifdef CAN_CHECK_ERR
+ check_error_logging(sock_err_fn, 20, EVENT_LOG_ERR, buf);
+ RESET();
+#endif
+
+#undef RESET
+#undef LOGEQ
+end:
+ if (logmsg)
+ free(logmsg);
+ if (fd >= 0)
+ evutil_closesocket(fd);
+}
+
+static void
+test_evutil_strlcpy(void *arg)
+{
+ char buf[8];
+
+ /* Successful case. */
+ tt_int_op(5, ==, strlcpy(buf, "Hello", sizeof(buf)));
+ tt_str_op(buf, ==, "Hello");
+
+ /* Overflow by a lot. */
+ tt_int_op(13, ==, strlcpy(buf, "pentasyllabic", sizeof(buf)));
+ tt_str_op(buf, ==, "pentasy");
+
+ /* Overflow by exactly one. */
+ tt_int_op(8, ==, strlcpy(buf, "overlong", sizeof(buf)));
+ tt_str_op(buf, ==, "overlon");
+end:
+ ;
+}
+
+struct example_struct {
+ const char *a;
+ const char *b;
+ long c;
+};
+
+static void
+test_evutil_upcast(void *arg)
+{
+ struct example_struct es1;
+ const char **cp;
+ es1.a = "World";
+ es1.b = "Hello";
+ es1.c = -99;
+
+ tt_int_op(evutil_offsetof(struct example_struct, b), ==, sizeof(char*));
+
+ cp = &es1.b;
+ tt_ptr_op(EVUTIL_UPCAST(cp, struct example_struct, b), ==, &es1);
+
+end:
+ ;
+}
+
+static void
+test_evutil_integers(void *arg)
+{
+ ev_int64_t i64;
+ ev_uint64_t u64;
+ ev_int32_t i32;
+ ev_uint32_t u32;
+ ev_int16_t i16;
+ ev_uint16_t u16;
+ ev_int8_t i8;
+ ev_uint8_t u8;
+
+ void *ptr;
+ ev_intptr_t iptr;
+ ev_uintptr_t uptr;
+
+ ev_ssize_t ssize;
+
+ tt_int_op(sizeof(u64), ==, 8);
+ tt_int_op(sizeof(i64), ==, 8);
+ tt_int_op(sizeof(u32), ==, 4);
+ tt_int_op(sizeof(i32), ==, 4);
+ tt_int_op(sizeof(u16), ==, 2);
+ tt_int_op(sizeof(i16), ==, 2);
+ tt_int_op(sizeof(u8), ==, 1);
+ tt_int_op(sizeof(i8), ==, 1);
+
+ tt_int_op(sizeof(ev_ssize_t), ==, sizeof(size_t));
+ tt_int_op(sizeof(ev_intptr_t), >=, sizeof(void *));
+ tt_int_op(sizeof(ev_uintptr_t), ==, sizeof(intptr_t));
+
+ u64 = 1000000000;
+ u64 *= 1000000000;
+ tt_assert(u64 / 1000000000 == 1000000000);
+ i64 = -1000000000;
+ i64 *= 1000000000;
+ tt_assert(i64 / 1000000000 == -1000000000);
+
+ u64 = EV_UINT64_MAX;
+ i64 = EV_INT64_MAX;
+ tt_assert(u64 > 0);
+ tt_assert(i64 > 0);
+ u64++;
+/* i64++; */
+ tt_assert(u64 == 0);
+/* tt_assert(i64 == EV_INT64_MIN); */
+/* tt_assert(i64 < 0); */
+
+ u32 = EV_UINT32_MAX;
+ i32 = EV_INT32_MAX;
+ tt_assert(u32 > 0);
+ tt_assert(i32 > 0);
+ u32++;
+/* i32++; */
+ tt_assert(u32 == 0);
+/* tt_assert(i32 == EV_INT32_MIN); */
+/* tt_assert(i32 < 0); */
+
+ u16 = EV_UINT16_MAX;
+ i16 = EV_INT16_MAX;
+ tt_assert(u16 > 0);
+ tt_assert(i16 > 0);
+ u16++;
+/* i16++; */
+ tt_assert(u16 == 0);
+/* tt_assert(i16 == EV_INT16_MIN); */
+/* tt_assert(i16 < 0); */
+
+ u8 = EV_UINT8_MAX;
+ i8 = EV_INT8_MAX;
+ tt_assert(u8 > 0);
+ tt_assert(i8 > 0);
+ u8++;
+/* i8++;*/
+ tt_assert(u8 == 0);
+/* tt_assert(i8 == EV_INT8_MIN); */
+/* tt_assert(i8 < 0); */
+
+/*
+ ssize = EV_SSIZE_MAX;
+ tt_assert(ssize > 0);
+ ssize++;
+ tt_assert(ssize < 0);
+ tt_assert(ssize == EV_SSIZE_MIN);
+*/
+
+ ptr = &ssize;
+ iptr = (ev_intptr_t)ptr;
+ uptr = (ev_uintptr_t)ptr;
+ ptr = (void *)iptr;
+ tt_assert(ptr == &ssize);
+ ptr = (void *)uptr;
+ tt_assert(ptr == &ssize);
+
+ iptr = -1;
+ tt_assert(iptr < 0);
+end:
+ ;
+}
+
+struct evutil_addrinfo *
+ai_find_by_family(struct evutil_addrinfo *ai, int family)
+{
+ while (ai) {
+ if (ai->ai_family == family)
+ return ai;
+ ai = ai->ai_next;
+ }
+ return NULL;
+}
+
+struct evutil_addrinfo *
+ai_find_by_protocol(struct evutil_addrinfo *ai, int protocol)
+{
+ while (ai) {
+ if (ai->ai_protocol == protocol)
+ return ai;
+ ai = ai->ai_next;
+ }
+ return NULL;
+}
+
+
+int
+test_ai_eq_(const struct evutil_addrinfo *ai, const char *sockaddr_port,
+ int socktype, int protocol, int line)
+{
+ struct sockaddr_storage ss;
+ int slen = sizeof(ss);
+ int gotport;
+ char buf[128];
+ memset(&ss, 0, sizeof(ss));
+ if (socktype > 0)
+ tt_int_op(ai->ai_socktype, ==, socktype);
+ if (protocol > 0)
+ tt_int_op(ai->ai_protocol, ==, protocol);
+
+ if (evutil_parse_sockaddr_port(
+ sockaddr_port, (struct sockaddr*)&ss, &slen)<0) {
+ TT_FAIL(("Couldn't parse expected address %s on line %d",
+ sockaddr_port, line));
+ return -1;
+ }
+ if (ai->ai_family != ss.ss_family) {
+ TT_FAIL(("Address family %d did not match %d on line %d",
+ ai->ai_family, ss.ss_family, line));
+ return -1;
+ }
+ if (ai->ai_addr->sa_family == AF_INET) {
+ struct sockaddr_in *sin = (struct sockaddr_in*)ai->ai_addr;
+ evutil_inet_ntop(AF_INET, &sin->sin_addr, buf, sizeof(buf));
+ gotport = ntohs(sin->sin_port);
+ if (ai->ai_addrlen != sizeof(struct sockaddr_in)) {
+ TT_FAIL(("Addr size mismatch on line %d", line));
+ return -1;
+ }
+ } else {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)ai->ai_addr;
+ evutil_inet_ntop(AF_INET6, &sin6->sin6_addr, buf, sizeof(buf));
+ gotport = ntohs(sin6->sin6_port);
+ if (ai->ai_addrlen != sizeof(struct sockaddr_in6)) {
+ TT_FAIL(("Addr size mismatch on line %d", line));
+ return -1;
+ }
+ }
+ if (evutil_sockaddr_cmp(ai->ai_addr, (struct sockaddr*)&ss, 1)) {
+ TT_FAIL(("Wanted %s, got %s:%d on line %d", sockaddr_port,
+ buf, gotport, line));
+ return -1;
+ } else {
+ TT_BLATHER(("Wanted %s, got %s:%d on line %d", sockaddr_port,
+ buf, gotport, line));
+ }
+ return 0;
+end:
+ TT_FAIL(("Test failed on line %d", line));
+ return -1;
+}
+
+static void
+test_evutil_rand(void *arg)
+{
+ char buf1[32];
+ char buf2[32];
+ int counts[256];
+ int i, j, k, n=0;
+ struct evutil_weakrand_state seed = { 12346789U };
+
+ memset(buf2, 0, sizeof(buf2));
+ memset(counts, 0, sizeof(counts));
+
+ for (k=0;k<32;++k) {
+ /* Try a few different start and end points; try to catch
+ * the various misaligned cases of arc4random_buf */
+ int startpoint = evutil_weakrand_(&seed) % 4;
+ int endpoint = 32 - (evutil_weakrand_(&seed) % 4);
+
+ memset(buf2, 0, sizeof(buf2));
+
+ /* Do 6 runs over buf1, or-ing the result into buf2 each
+ * time, to make sure we're setting each byte that we mean
+ * to set. */
+ for (i=0;i<8;++i) {
+ memset(buf1, 0, sizeof(buf1));
+ evutil_secure_rng_get_bytes(buf1 + startpoint,
+ endpoint-startpoint);
+ n += endpoint - startpoint;
+ for (j=0; j<32; ++j) {
+ if (j >= startpoint && j < endpoint) {
+ buf2[j] |= buf1[j];
+ ++counts[(unsigned char)buf1[j]];
+ } else {
+ tt_assert(buf1[j] == 0);
+ tt_int_op(buf1[j], ==, 0);
+
+ }
+ }
+ }
+
+ /* This will give a false positive with P=(256**8)==(2**64)
+ * for each character. */
+ for (j=startpoint;j<endpoint;++j) {
+ tt_int_op(buf2[j], !=, 0);
+ }
+ }
+
+ evutil_weakrand_seed_(&seed, 0);
+ for (i = 0; i < 10000; ++i) {
+ ev_int32_t r = evutil_weakrand_range_(&seed, 9999);
+ tt_int_op(0, <=, r);
+ tt_int_op(r, <, 9999);
+ }
+
+ /* for (i=0;i<256;++i) { printf("%3d %2d\n", i, counts[i]); } */
+end:
+ ;
+}
+
+static void
+test_evutil_getaddrinfo(void *arg)
+{
+ struct evutil_addrinfo *ai = NULL, *a;
+ struct evutil_addrinfo hints;
+ int r;
+
+ /* Try using it as a pton. */
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+ r = evutil_getaddrinfo("1.2.3.4", "8080", &hints, &ai);
+ tt_int_op(r, ==, 0);
+ tt_assert(ai);
+ tt_ptr_op(ai->ai_next, ==, NULL); /* no ambiguity */
+ test_ai_eq(ai, "1.2.3.4:8080", SOCK_STREAM, IPPROTO_TCP);
+ evutil_freeaddrinfo(ai);
+ ai = NULL;
+
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_protocol = IPPROTO_UDP;
+ r = evutil_getaddrinfo("1001:b0b::f00f", "4321", &hints, &ai);
+ tt_int_op(r, ==, 0);
+ tt_assert(ai);
+ tt_ptr_op(ai->ai_next, ==, NULL); /* no ambiguity */
+ test_ai_eq(ai, "[1001:b0b::f00f]:4321", SOCK_DGRAM, IPPROTO_UDP);
+ evutil_freeaddrinfo(ai);
+ ai = NULL;
+
+ /* Try out the behavior of nodename=NULL */
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_INET;
+ hints.ai_protocol = IPPROTO_TCP;
+ hints.ai_flags = EVUTIL_AI_PASSIVE; /* as if for bind */
+ r = evutil_getaddrinfo(NULL, "9999", &hints, &ai);
+ tt_int_op(r,==,0);
+ tt_assert(ai);
+ tt_ptr_op(ai->ai_next, ==, NULL);
+ test_ai_eq(ai, "0.0.0.0:9999", SOCK_STREAM, IPPROTO_TCP);
+ evutil_freeaddrinfo(ai);
+ ai = NULL;
+ hints.ai_flags = 0; /* as if for connect */
+ r = evutil_getaddrinfo(NULL, "9998", &hints, &ai);
+ tt_assert(ai);
+ tt_int_op(r,==,0);
+ test_ai_eq(ai, "127.0.0.1:9998", SOCK_STREAM, IPPROTO_TCP);
+ tt_ptr_op(ai->ai_next, ==, NULL);
+ evutil_freeaddrinfo(ai);
+ ai = NULL;
+
+ hints.ai_flags = 0; /* as if for connect */
+ hints.ai_family = PF_INET6;
+ r = evutil_getaddrinfo(NULL, "9997", &hints, &ai);
+ tt_assert(ai);
+ tt_int_op(r,==,0);
+ tt_ptr_op(ai->ai_next, ==, NULL);
+ test_ai_eq(ai, "[::1]:9997", SOCK_STREAM, IPPROTO_TCP);
+ evutil_freeaddrinfo(ai);
+ ai = NULL;
+
+ hints.ai_flags = EVUTIL_AI_PASSIVE; /* as if for bind. */
+ hints.ai_family = PF_INET6;
+ r = evutil_getaddrinfo(NULL, "9996", &hints, &ai);
+ tt_assert(ai);
+ tt_int_op(r,==,0);
+ tt_ptr_op(ai->ai_next, ==, NULL);
+ test_ai_eq(ai, "[::]:9996", SOCK_STREAM, IPPROTO_TCP);
+ evutil_freeaddrinfo(ai);
+ ai = NULL;
+
+ /* Now try an unspec one. We should get a v6 and a v4. */
+ hints.ai_family = PF_UNSPEC;
+ r = evutil_getaddrinfo(NULL, "9996", &hints, &ai);
+ tt_assert(ai);
+ tt_int_op(r,==,0);
+ a = ai_find_by_family(ai, PF_INET6);
+ tt_assert(a);
+ test_ai_eq(a, "[::]:9996", SOCK_STREAM, IPPROTO_TCP);
+ a = ai_find_by_family(ai, PF_INET);
+ tt_assert(a);
+ test_ai_eq(a, "0.0.0.0:9996", SOCK_STREAM, IPPROTO_TCP);
+ evutil_freeaddrinfo(ai);
+ ai = NULL;
+
+ /* Try out AI_NUMERICHOST: successful case. Also try
+ * multiprotocol. */
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_flags = EVUTIL_AI_NUMERICHOST;
+ r = evutil_getaddrinfo("1.2.3.4", NULL, &hints, &ai);
+ tt_int_op(r, ==, 0);
+ a = ai_find_by_protocol(ai, IPPROTO_TCP);
+ tt_assert(a);
+ test_ai_eq(a, "1.2.3.4", SOCK_STREAM, IPPROTO_TCP);
+ a = ai_find_by_protocol(ai, IPPROTO_UDP);
+ tt_assert(a);
+ test_ai_eq(a, "1.2.3.4", SOCK_DGRAM, IPPROTO_UDP);
+ evutil_freeaddrinfo(ai);
+ ai = NULL;
+
+ /* Try the failing case of AI_NUMERICHOST */
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_flags = EVUTIL_AI_NUMERICHOST;
+ r = evutil_getaddrinfo("www.google.com", "80", &hints, &ai);
+ tt_int_op(r, ==, EVUTIL_EAI_NONAME);
+ tt_ptr_op(ai, ==, NULL);
+
+ /* Try symbolic service names wit AI_NUMERICSERV */
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_flags = EVUTIL_AI_NUMERICSERV;
+ r = evutil_getaddrinfo("1.2.3.4", "http", &hints, &ai);
+ tt_int_op(r,==,EVUTIL_EAI_NONAME);
+
+ /* Try symbolic service names */
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+ r = evutil_getaddrinfo("1.2.3.4", "http", &hints, &ai);
+ if (r!=0) {
+ TT_DECLARE("SKIP", ("Symbolic service names seem broken."));
+ } else {
+ tt_assert(ai);
+ test_ai_eq(ai, "1.2.3.4:80", SOCK_STREAM, IPPROTO_TCP);
+ evutil_freeaddrinfo(ai);
+ ai = NULL;
+ }
+
+end:
+ if (ai)
+ evutil_freeaddrinfo(ai);
+}
+
+static void
+test_evutil_getaddrinfo_live(void *arg)
+{
+ struct evutil_addrinfo *ai = NULL;
+ struct evutil_addrinfo hints;
+
+ struct sockaddr_in6 *sin6;
+ struct sockaddr_in *sin;
+ char buf[128];
+ const char *cp;
+ int r;
+
+ /* Now do some actual lookups. */
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_INET;
+ hints.ai_protocol = IPPROTO_TCP;
+ hints.ai_socktype = SOCK_STREAM;
+ r = evutil_getaddrinfo("www.google.com", "80", &hints, &ai);
+ if (r != 0) {
+ TT_DECLARE("SKIP", ("Couldn't resolve www.google.com"));
+ } else {
+ tt_assert(ai);
+ tt_int_op(ai->ai_family, ==, PF_INET);
+ tt_int_op(ai->ai_protocol, ==, IPPROTO_TCP);
+ tt_int_op(ai->ai_socktype, ==, SOCK_STREAM);
+ tt_int_op(ai->ai_addrlen, ==, sizeof(struct sockaddr_in));
+ sin = (struct sockaddr_in*)ai->ai_addr;
+ tt_int_op(sin->sin_family, ==, AF_INET);
+ tt_int_op(sin->sin_port, ==, htons(80));
+ tt_int_op(sin->sin_addr.s_addr, !=, 0xffffffff);
+
+ cp = evutil_inet_ntop(AF_INET, &sin->sin_addr, buf, sizeof(buf));
+ TT_BLATHER(("www.google.com resolved to %s",
+ cp?cp:"<unwriteable>"));
+ evutil_freeaddrinfo(ai);
+ ai = NULL;
+ }
+
+ hints.ai_family = PF_INET6;
+ r = evutil_getaddrinfo("ipv6.google.com", "80", &hints, &ai);
+ if (r != 0) {
+ TT_BLATHER(("Couldn't do an ipv6 lookup for ipv6.google.com"));
+ } else {
+ tt_assert(ai);
+ tt_int_op(ai->ai_family, ==, PF_INET6);
+ tt_int_op(ai->ai_addrlen, ==, sizeof(struct sockaddr_in6));
+ sin6 = (struct sockaddr_in6*)ai->ai_addr;
+ tt_int_op(sin6->sin6_port, ==, htons(80));
+
+ cp = evutil_inet_ntop(AF_INET6, &sin6->sin6_addr, buf,
+ sizeof(buf));
+ TT_BLATHER(("ipv6.google.com resolved to %s",
+ cp?cp:"<unwriteable>"));
+ }
+
+end:
+ if (ai)
+ evutil_freeaddrinfo(ai);
+}
+
+#ifdef _WIN32
+static void
+test_evutil_loadsyslib(void *arg)
+{
+ HMODULE h=NULL;
+
+ h = evutil_load_windows_system_library_(TEXT("kernel32.dll"));
+ tt_assert(h);
+
+end:
+ if (h)
+ CloseHandle(h);
+
+}
+#endif
+
+/** Test mm_malloc(). */
+static void
+test_event_malloc(void *arg)
+{
+ void *p = NULL;
+ (void)arg;
+
+ /* mm_malloc(0) should simply return NULL. */
+#ifndef EVENT__DISABLE_MM_REPLACEMENT
+ errno = 0;
+ p = mm_malloc(0);
+ tt_assert(p == NULL);
+ tt_int_op(errno, ==, 0);
+#endif
+
+ /* Trivial case. */
+ errno = 0;
+ p = mm_malloc(8);
+ tt_assert(p != NULL);
+ tt_int_op(errno, ==, 0);
+ mm_free(p);
+
+ end:
+ errno = 0;
+ return;
+}
+
+static void
+test_event_calloc(void *arg)
+{
+ void *p = NULL;
+ (void)arg;
+
+#ifndef EVENT__DISABLE_MM_REPLACEMENT
+ /* mm_calloc() should simply return NULL
+ * if either argument is zero. */
+ errno = 0;
+ p = mm_calloc(0, 0);
+ tt_assert(p == NULL);
+ tt_int_op(errno, ==, 0);
+ errno = 0;
+ p = mm_calloc(0, 1);
+ tt_assert(p == NULL);
+ tt_int_op(errno, ==, 0);
+ errno = 0;
+ p = mm_calloc(1, 0);
+ tt_assert(p == NULL);
+ tt_int_op(errno, ==, 0);
+#endif
+
+ /* Trivial case. */
+ errno = 0;
+ p = mm_calloc(8, 8);
+ tt_assert(p != NULL);
+ tt_int_op(errno, ==, 0);
+ mm_free(p);
+ p = NULL;
+
+ /* mm_calloc() should set errno = ENOMEM and return NULL
+ * in case of potential overflow. */
+ errno = 0;
+ p = mm_calloc(EV_SIZE_MAX/2, EV_SIZE_MAX/2 + 8);
+ tt_assert(p == NULL);
+ tt_int_op(errno, ==, ENOMEM);
+
+ end:
+ errno = 0;
+ if (p)
+ mm_free(p);
+
+ return;
+}
+
+static void
+test_event_strdup(void *arg)
+{
+ void *p = NULL;
+ (void)arg;
+
+#ifndef EVENT__DISABLE_MM_REPLACEMENT
+ /* mm_strdup(NULL) should set errno = EINVAL and return NULL. */
+ errno = 0;
+ p = mm_strdup(NULL);
+ tt_assert(p == NULL);
+ tt_int_op(errno, ==, EINVAL);
+#endif
+
+ /* Trivial cases. */
+
+ errno = 0;
+ p = mm_strdup("");
+ tt_assert(p != NULL);
+ tt_int_op(errno, ==, 0);
+ tt_str_op(p, ==, "");
+ mm_free(p);
+
+ errno = 0;
+ p = mm_strdup("foo");
+ tt_assert(p != NULL);
+ tt_int_op(errno, ==, 0);
+ tt_str_op(p, ==, "foo");
+ mm_free(p);
+
+ /* XXX
+ * mm_strdup(str) where str is a string of length EV_SIZE_MAX
+ * should set errno = ENOMEM and return NULL. */
+
+ end:
+ errno = 0;
+ return;
+}
+
+static void
+test_evutil_usleep(void *arg)
+{
+ struct timeval tv1, tv2, tv3, diff1, diff2;
+ const struct timeval quarter_sec = {0, 250*1000};
+ const struct timeval tenth_sec = {0, 100*1000};
+ long usec1, usec2;
+
+ evutil_gettimeofday(&tv1, NULL);
+ evutil_usleep_(&quarter_sec);
+ evutil_gettimeofday(&tv2, NULL);
+ evutil_usleep_(&tenth_sec);
+ evutil_gettimeofday(&tv3, NULL);
+
+ evutil_timersub(&tv2, &tv1, &diff1);
+ evutil_timersub(&tv3, &tv2, &diff2);
+ usec1 = diff1.tv_sec * 1000000 + diff1.tv_usec;
+ usec2 = diff2.tv_sec * 1000000 + diff2.tv_usec;
+
+ tt_int_op(usec1, >, 200000);
+ tt_int_op(usec1, <, 300000);
+ tt_int_op(usec2, >, 80000);
+ tt_int_op(usec2, <, 120000);
+
+end:
+ ;
+}
+
+static void
+test_evutil_monotonic_res(void *data_)
+{
+ /* Basic santity-test for monotonic timers. What we'd really like
+ * to do is make sure that they can't go backwards even when the
+ * system clock goes backwards. But we haven't got a good way to
+ * move the system clock backwards.
+ */
+ struct basic_test_data *data = data_;
+ struct evutil_monotonic_timer timer;
+ const int precise = strstr(data->setup_data, "precise") != NULL;
+ const int fallback = strstr(data->setup_data, "fallback") != NULL;
+ struct timeval tv[10], delay;
+ int total_diff = 0;
+
+ int flags = 0, wantres, acceptdiff, i;
+ if (precise)
+ flags |= EV_MONOT_PRECISE;
+ if (fallback)
+ flags |= EV_MONOT_FALLBACK;
+ if (precise || fallback) {
+#ifdef _WIN32
+ wantres = 10*1000;
+ acceptdiff = 1000;
+#else
+ wantres = 1000;
+ acceptdiff = 300;
+#endif
+ } else {
+ wantres = 40*1000;
+ acceptdiff = 20*1000;
+ }
+
+ TT_BLATHER(("Precise = %d", precise));
+ TT_BLATHER(("Fallback = %d", fallback));
+
+ /* First, make sure we match up with usleep. */
+
+ delay.tv_sec = 0;
+ delay.tv_usec = wantres;
+
+ tt_int_op(evutil_configure_monotonic_time_(&timer, flags), ==, 0);
+
+ for (i = 0; i < 10; ++i) {
+ evutil_gettime_monotonic_(&timer, &tv[i]);
+ evutil_usleep_(&delay);
+ }
+
+ for (i = 0; i < 9; ++i) {
+ struct timeval diff;
+ tt_assert(evutil_timercmp(&tv[i], &tv[i+1], <));
+ evutil_timersub(&tv[i+1], &tv[i], &diff);
+ tt_int_op(diff.tv_sec, ==, 0);
+ total_diff += diff.tv_usec;
+ TT_BLATHER(("Difference = %d", (int)diff.tv_usec));
+ }
+ tt_int_op(abs(total_diff/9 - wantres), <, acceptdiff);
+
+end:
+ ;
+}
+
+static void
+test_evutil_monotonic_prc(void *data_)
+{
+ struct basic_test_data *data = data_;
+ struct evutil_monotonic_timer timer;
+ const int precise = strstr(data->setup_data, "precise") != NULL;
+ const int fallback = strstr(data->setup_data, "fallback") != NULL;
+ struct timeval tv[10];
+ int total_diff = 0;
+ int i, maxstep = 25*1000,flags=0;
+ if (precise)
+ maxstep = 500;
+ if (precise)
+ flags |= EV_MONOT_PRECISE;
+ if (fallback)
+ flags |= EV_MONOT_FALLBACK;
+ tt_int_op(evutil_configure_monotonic_time_(&timer, flags), ==, 0);
+
+ /* find out what precision we actually see. */
+
+ evutil_gettime_monotonic_(&timer, &tv[0]);
+ for (i = 1; i < 10; ++i) {
+ do {
+ evutil_gettime_monotonic_(&timer, &tv[i]);
+ } while (evutil_timercmp(&tv[i-1], &tv[i], ==));
+ }
+
+ total_diff = 0;
+ for (i = 0; i < 9; ++i) {
+ struct timeval diff;
+ tt_assert(evutil_timercmp(&tv[i], &tv[i+1], <));
+ evutil_timersub(&tv[i+1], &tv[i], &diff);
+ tt_int_op(diff.tv_sec, ==, 0);
+ total_diff += diff.tv_usec;
+ TT_BLATHER(("Step difference = %d", (int)diff.tv_usec));
+ }
+ TT_BLATHER(("Average step difference = %d", total_diff / 9));
+ tt_int_op(total_diff/9, <, maxstep);
+
+end:
+ ;
+}
+
+struct testcase_t util_testcases[] = {
+ { "ipv4_parse", regress_ipv4_parse, 0, NULL, NULL },
+ { "ipv6_parse", regress_ipv6_parse, 0, NULL, NULL },
+ { "sockaddr_port_parse", regress_sockaddr_port_parse, 0, NULL, NULL },
+ { "sockaddr_port_format", regress_sockaddr_port_format, 0, NULL, NULL },
+ { "sockaddr_predicates", test_evutil_sockaddr_predicates, 0,NULL,NULL },
+ { "evutil_snprintf", test_evutil_snprintf, 0, NULL, NULL },
+ { "evutil_strtoll", test_evutil_strtoll, 0, NULL, NULL },
+ { "evutil_casecmp", test_evutil_casecmp, 0, NULL, NULL },
+ { "evutil_rtrim", test_evutil_rtrim, 0, NULL, NULL },
+ { "strlcpy", test_evutil_strlcpy, 0, NULL, NULL },
+ { "log", test_evutil_log, TT_FORK, NULL, NULL },
+ { "upcast", test_evutil_upcast, 0, NULL, NULL },
+ { "integers", test_evutil_integers, 0, NULL, NULL },
+ { "rand", test_evutil_rand, TT_FORK, NULL, NULL },
+ { "getaddrinfo", test_evutil_getaddrinfo, TT_FORK, NULL, NULL },
+ { "getaddrinfo_live", test_evutil_getaddrinfo_live, TT_FORK|TT_OFF_BY_DEFAULT, NULL, NULL },
+#ifdef _WIN32
+ { "loadsyslib", test_evutil_loadsyslib, TT_FORK, NULL, NULL },
+#endif
+ { "mm_malloc", test_event_malloc, 0, NULL, NULL },
+ { "mm_calloc", test_event_calloc, 0, NULL, NULL },
+ { "mm_strdup", test_event_strdup, 0, NULL, NULL },
+ { "usleep", test_evutil_usleep, 0, NULL, NULL },
+ { "monotonic_res", test_evutil_monotonic_res, 0, &basic_setup, (void*)"" },
+ { "monotonic_res_precise", test_evutil_monotonic_res, TT_OFF_BY_DEFAULT, &basic_setup, (void*)"precise" },
+ { "monotonic_res_fallback", test_evutil_monotonic_res, TT_OFF_BY_DEFAULT, &basic_setup, (void*)"fallback" },
+ { "monotonic_prc", test_evutil_monotonic_prc, 0, &basic_setup, (void*)"" },
+ { "monotonic_prc_precise", test_evutil_monotonic_prc, 0, &basic_setup, (void*)"precise" },
+ { "monotonic_prc_fallback", test_evutil_monotonic_prc, 0, &basic_setup, (void*)"fallback" },
+ END_OF_TESTCASES,
+};
+
diff --git a/libs/libevent/docs/test/regress_zlib.c b/libs/libevent/docs/test/regress_zlib.c
new file mode 100644
index 0000000000..8406676932
--- /dev/null
+++ b/libs/libevent/docs/test/regress_zlib.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2008-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* The old tests here need assertions to work. */
+#undef NDEBUG
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+#include "event2/event-config.h"
+
+#include <sys/types.h>
+#ifndef _WIN32
+#include <sys/socket.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <netdb.h>
+#endif
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <assert.h>
+#include <errno.h>
+
+#include "event2/util.h"
+#include "event2/event.h"
+#include "event2/event_compat.h"
+#include "event2/buffer.h"
+#include "event2/bufferevent.h"
+
+#include "regress.h"
+#include "mm-internal.h"
+
+/* zlib 1.2.4 and 1.2.5 do some "clever" things with macros. Instead of
+ saying "(defined(FOO) ? FOO : 0)" they like to say "FOO-0", on the theory
+ that nobody will care if the compile outputs a no-such-identifier warning.
+
+ Sorry, but we like -Werror over here, so I guess we need to define these.
+ I hope that zlib 1.2.6 doesn't break these too.
+*/
+#ifndef _LARGEFILE64_SOURCE
+#define _LARGEFILE64_SOURCE 0
+#endif
+#ifndef _LFS64_LARGEFILE
+#define _LFS64_LARGEFILE 0
+#endif
+#ifndef _FILE_OFFSET_BITS
+#define _FILE_OFFSET_BITS 0
+#endif
+#ifndef off64_t
+#define off64_t ev_int64_t
+#endif
+
+#include <zlib.h>
+
+static int infilter_calls;
+static int outfilter_calls;
+static int readcb_finished;
+static int writecb_finished;
+static int errorcb_invoked;
+
+/*
+ * Zlib filters
+ */
+
+static void
+zlib_deflate_free(void *ctx)
+{
+ z_streamp p = ctx;
+
+ assert(deflateEnd(p) == Z_OK);
+ mm_free(p);
+}
+
+static void
+zlib_inflate_free(void *ctx)
+{
+ z_streamp p = ctx;
+
+ assert(inflateEnd(p) == Z_OK);
+ mm_free(p);
+}
+
+static int
+getstate(enum bufferevent_flush_mode state)
+{
+ switch (state) {
+ case BEV_FINISHED:
+ return Z_FINISH;
+ case BEV_FLUSH:
+ return Z_SYNC_FLUSH;
+ case BEV_NORMAL:
+ default:
+ return Z_NO_FLUSH;
+ }
+}
+
+/*
+ * The input filter is triggered only on new input read from the network.
+ * That means all input data needs to be consumed or the filter needs to
+ * initiate its own triggering via a timeout.
+ */
+static enum bufferevent_filter_result
+zlib_input_filter(struct evbuffer *src, struct evbuffer *dst,
+ ev_ssize_t lim, enum bufferevent_flush_mode state, void *ctx)
+{
+ struct evbuffer_iovec v_in[1];
+ struct evbuffer_iovec v_out[1];
+ int nread, nwrite;
+ int res, n;
+
+ z_streamp p = ctx;
+
+ do {
+ /* let's do some decompression */
+ n = evbuffer_peek(src, -1, NULL, v_in, 1);
+ if (n) {
+ p->avail_in = v_in[0].iov_len;
+ p->next_in = v_in[0].iov_base;
+ } else {
+ p->avail_in = 0;
+ p->next_in = 0;
+ }
+
+ evbuffer_reserve_space(dst, 4096, v_out, 1);
+ p->next_out = v_out[0].iov_base;
+ p->avail_out = v_out[0].iov_len;
+
+ /* we need to flush zlib if we got a flush */
+ res = inflate(p, getstate(state));
+
+ /* let's figure out how much was compressed */
+ nread = v_in[0].iov_len - p->avail_in;
+ nwrite = v_out[0].iov_len - p->avail_out;
+
+ evbuffer_drain(src, nread);
+ v_out[0].iov_len = nwrite;
+ evbuffer_commit_space(dst, v_out, 1);
+
+ if (res==Z_BUF_ERROR) {
+ /* We're out of space, or out of decodeable input.
+ Only if nwrite == 0 assume the latter.
+ */
+ if (nwrite == 0)
+ return BEV_NEED_MORE;
+ } else {
+ assert(res == Z_OK || res == Z_STREAM_END);
+ }
+
+ } while (evbuffer_get_length(src) > 0);
+
+ ++infilter_calls;
+
+ return (BEV_OK);
+}
+
+static enum bufferevent_filter_result
+zlib_output_filter(struct evbuffer *src, struct evbuffer *dst,
+ ev_ssize_t lim, enum bufferevent_flush_mode state, void *ctx)
+{
+ struct evbuffer_iovec v_in[1];
+ struct evbuffer_iovec v_out[1];
+ int nread, nwrite;
+ int res, n;
+
+ z_streamp p = ctx;
+
+ do {
+ /* let's do some compression */
+ n = evbuffer_peek(src, -1, NULL, v_in, 1);
+ if (n) {
+ p->avail_in = v_in[0].iov_len;
+ p->next_in = v_in[0].iov_base;
+ } else {
+ p->avail_in = 0;
+ p->next_in = 0;
+ }
+
+ evbuffer_reserve_space(dst, 4096, v_out, 1);
+ p->next_out = v_out[0].iov_base;
+ p->avail_out = v_out[0].iov_len;
+
+ /* we need to flush zlib if we got a flush */
+ res = deflate(p, getstate(state));
+
+ /* let's figure out how much was decompressed */
+ nread = v_in[0].iov_len - p->avail_in;
+ nwrite = v_out[0].iov_len - p->avail_out;
+
+ evbuffer_drain(src, nread);
+ v_out[0].iov_len = nwrite;
+ evbuffer_commit_space(dst, v_out, 1);
+
+ if (res==Z_BUF_ERROR) {
+ /* We're out of space, or out of decodeable input.
+ Only if nwrite == 0 assume the latter.
+ */
+ if (nwrite == 0)
+ return BEV_NEED_MORE;
+ } else {
+ assert(res == Z_OK || res == Z_STREAM_END);
+ }
+
+ } while (evbuffer_get_length(src) > 0);
+
+ ++outfilter_calls;
+
+ return (BEV_OK);
+}
+
+/*
+ * simple bufferevent test (over transparent zlib treatment)
+ */
+
+static void
+readcb(struct bufferevent *bev, void *arg)
+{
+ if (evbuffer_get_length(bufferevent_get_input(bev)) == 8333) {
+ struct evbuffer *evbuf = evbuffer_new();
+ assert(evbuf != NULL);
+
+ /* gratuitous test of bufferevent_read_buffer */
+ bufferevent_read_buffer(bev, evbuf);
+
+ bufferevent_disable(bev, EV_READ);
+
+ if (evbuffer_get_length(evbuf) == 8333) {
+ ++readcb_finished;
+ }
+
+ evbuffer_free(evbuf);
+ }
+}
+
+static void
+writecb(struct bufferevent *bev, void *arg)
+{
+ if (evbuffer_get_length(bufferevent_get_output(bev)) == 0) {
+ ++writecb_finished;
+ }
+}
+
+static void
+errorcb(struct bufferevent *bev, short what, void *arg)
+{
+ errorcb_invoked = 1;
+}
+
+void
+test_bufferevent_zlib(void *arg)
+{
+ struct bufferevent *bev1=NULL, *bev2=NULL;
+ char buffer[8333];
+ z_stream *z_input, *z_output;
+ int i, r;
+ evutil_socket_t pair[2] = {-1, -1};
+ (void)arg;
+
+ infilter_calls = outfilter_calls = readcb_finished = writecb_finished
+ = errorcb_invoked = 0;
+
+ if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) {
+ tt_abort_perror("socketpair");
+ }
+
+ evutil_make_socket_nonblocking(pair[0]);
+ evutil_make_socket_nonblocking(pair[1]);
+
+ bev1 = bufferevent_socket_new(NULL, pair[0], 0);
+ bev2 = bufferevent_socket_new(NULL, pair[1], 0);
+
+ z_output = mm_calloc(sizeof(*z_output), 1);
+ r = deflateInit(z_output, Z_DEFAULT_COMPRESSION);
+ tt_int_op(r, ==, Z_OK);
+ z_input = mm_calloc(sizeof(*z_input), 1);
+ r = inflateInit(z_input);
+ tt_int_op(r, ==, Z_OK);
+
+ /* initialize filters */
+ bev1 = bufferevent_filter_new(bev1, NULL, zlib_output_filter,
+ BEV_OPT_CLOSE_ON_FREE, zlib_deflate_free, z_output);
+ bev2 = bufferevent_filter_new(bev2, zlib_input_filter,
+ NULL, BEV_OPT_CLOSE_ON_FREE, zlib_inflate_free, z_input);
+ bufferevent_setcb(bev1, readcb, writecb, errorcb, NULL);
+ bufferevent_setcb(bev2, readcb, writecb, errorcb, NULL);
+
+ bufferevent_disable(bev1, EV_READ);
+ bufferevent_enable(bev1, EV_WRITE);
+
+ bufferevent_enable(bev2, EV_READ);
+
+ for (i = 0; i < (int)sizeof(buffer); i++)
+ buffer[i] = i;
+
+ /* break it up into multiple buffer chains */
+ bufferevent_write(bev1, buffer, 1800);
+ bufferevent_write(bev1, buffer + 1800, sizeof(buffer) - 1800);
+
+ /* we are done writing - we need to flush everything */
+ bufferevent_flush(bev1, EV_WRITE, BEV_FINISHED);
+
+ event_dispatch();
+
+ tt_want(infilter_calls);
+ tt_want(outfilter_calls);
+ tt_want(readcb_finished);
+ tt_want(writecb_finished);
+ tt_want(!errorcb_invoked);
+
+ test_ok = 1;
+end:
+ if (bev1)
+ bufferevent_free(bev1);
+ if (bev2)
+ bufferevent_free(bev2);
+
+ if (pair[0] >= 0)
+ evutil_closesocket(pair[0]);
+ if (pair[1] >= 0)
+ evutil_closesocket(pair[1]);
+}
diff --git a/libs/libevent/docs/test/rpcgen_wrapper.sh b/libs/libevent/docs/test/rpcgen_wrapper.sh
new file mode 100644
index 0000000000..aaa03031a1
--- /dev/null
+++ b/libs/libevent/docs/test/rpcgen_wrapper.sh
@@ -0,0 +1,52 @@
+#!/bin/sh
+# libevent rpcgen_wrapper.sh
+# Transforms event_rpcgen.py failure into success for make, only if
+# regress.gen.c and regress.gen.h already exist in $srcdir. This
+# is needed for "make distcheck" to pass the read-only $srcdir build,
+# as with read-only sources fresh from tarball, regress.gen.[ch] will
+# be correct in $srcdir but unwritable. This previously triggered
+# Makefile.am to create stub regress.gen.c and regress.gen.h in the
+# distcheck _build directory, which were then detected as leftover
+# files in the build tree after distclean, breaking distcheck.
+# Note that regress.gen.[ch] are not in fresh git clones, making
+# working Python a requirement for make distcheck of a git tree.
+
+exit_updated() {
+# echo "Updated ${srcdir}/regress.gen.c and ${srcdir}/regress.gen.h"
+ exit 0
+}
+
+exit_reuse() {
+ echo "event_rpcgen.py failed, ${srcdir}/regress.gen.\[ch\] will be reused." >&2
+ exit 0
+}
+
+exit_failed() {
+ echo "Could not generate regress.gen.\[ch\] using event_rpcgen.sh" >&2
+ exit 1
+}
+
+if [ -x /usr/bin/python2 ] ; then
+ PYTHON2=/usr/bin/python2
+elif [ "x`which python2`" != x ] ; then
+ PYTHON2=python2
+else
+ PYTHON2=python
+fi
+
+srcdir=$1
+srcdir=${srcdir:-.}
+
+${PYTHON2} ${srcdir}/../event_rpcgen.py --quiet ${srcdir}/regress.rpc \
+ test/regress.gen.h test/regress.gen.c
+
+case "$?" in
+ 0)
+ exit_updated
+ ;;
+ *)
+ test -r ${srcdir}/regress.gen.c -a -r ${srcdir}/regress.gen.h && \
+ exit_reuse
+ exit_failed
+ ;;
+esac
diff --git a/libs/libevent/docs/test/test-changelist.c b/libs/libevent/docs/test/test-changelist.c
new file mode 100644
index 0000000000..6e2466d5a5
--- /dev/null
+++ b/libs/libevent/docs/test/test-changelist.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2010-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <windows.h>
+#else
+#include <unistd.h>
+#endif
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include "event2/event.h"
+#include "event2/util.h"
+#include <time.h>
+
+struct cpu_usage_timer {
+#ifdef _WIN32
+ HANDLE thread;
+ FILETIME usertimeBegin;
+ FILETIME kerneltimeBegin;
+#else
+ clock_t ticksBegin;
+#endif
+ struct timeval timeBegin;
+};
+static void
+start_cpu_usage_timer(struct cpu_usage_timer *timer)
+{
+#ifdef _WIN32
+ int r;
+ FILETIME createtime, exittime;
+ timer->thread = GetCurrentThread();
+ r = GetThreadTimes(timer->thread, &createtime, &exittime,
+ &timer->usertimeBegin, &timer->kerneltimeBegin);
+ if (r==0) printf("GetThreadTimes failed.");
+#else
+ timer->ticksBegin = clock();
+#endif
+
+ evutil_gettimeofday(&timer->timeBegin, NULL);
+}
+#ifdef _WIN32
+static ev_int64_t
+filetime_to_100nsec(const FILETIME *ft)
+{
+ /* Number of 100-nanosecond units */
+ ev_int64_t n = ft->dwHighDateTime;
+ n <<= 32;
+ n += ft->dwLowDateTime;
+ return n;
+}
+static double
+filetime_diff(const FILETIME *ftStart, const FILETIME *ftEnd)
+{
+ ev_int64_t s, e, diff;
+ double r;
+ s = filetime_to_100nsec(ftStart);
+ e = filetime_to_100nsec(ftEnd);
+ diff = e - s;
+ r = (double) diff;
+ return r / 1.0e7;
+}
+#endif
+
+static void
+get_cpu_usage(struct cpu_usage_timer *timer, double *secElapsedOut,
+ double *secUsedOut, double *usageOut)
+{
+#ifdef _WIN32
+ double usertime_seconds, kerneltime_seconds;
+ FILETIME createtime, exittime, usertimeEnd, kerneltimeEnd;
+ int r;
+#else
+ clock_t ticksEnd;
+#endif
+ struct timeval timeEnd, timeDiff;
+ double secondsPassed, secondsUsed;
+
+#ifdef _WIN32
+ r = GetThreadTimes(timer->thread, &createtime, &exittime,
+ &usertimeEnd, &kerneltimeEnd);
+ if (r==0) printf("GetThreadTimes failed.");
+ usertime_seconds = filetime_diff(&timer->usertimeBegin, &usertimeEnd);
+ kerneltime_seconds = filetime_diff(&timer->kerneltimeBegin, &kerneltimeEnd);
+ secondsUsed = kerneltime_seconds + usertime_seconds;
+#else
+ ticksEnd = clock();
+ secondsUsed = (ticksEnd - timer->ticksBegin) / (double)CLOCKS_PER_SEC;
+#endif
+ evutil_gettimeofday(&timeEnd, NULL);
+ evutil_timersub(&timeEnd, &timer->timeBegin, &timeDiff);
+ secondsPassed = timeDiff.tv_sec + (timeDiff.tv_usec / 1.0e6);
+
+ *secElapsedOut = secondsPassed;
+ *secUsedOut = secondsUsed;
+ *usageOut = secondsUsed / secondsPassed;
+}
+
+static void
+write_cb(evutil_socket_t fd, short event, void *arg)
+{
+ printf("write callback. should only see this once\n");
+
+ /* got what we want remove the event */
+ event_del(*(struct event**)arg);
+
+ /* opps changed my mind add it back again */
+ event_add(*(struct event**)arg,NULL);
+
+ /* not a good day for decisiveness, I really didn't want it after all */
+ event_del(*(struct event**)arg);
+
+}
+
+static void
+timeout_cb(evutil_socket_t fd, short event, void *arg)
+{
+ printf("timeout fired, time to end test\n");
+ event_del(*(struct event**)arg);
+ return;
+}
+
+int
+main(int argc, char **argv)
+{
+ struct event* ev;
+ struct event* timeout;
+ struct event_base* base;
+
+ evutil_socket_t pair[2];
+ struct timeval tv;
+ struct cpu_usage_timer timer;
+
+ double usage, secPassed, secUsed;
+
+#ifdef _WIN32
+ WORD wVersionRequested;
+ WSADATA wsaData;
+
+ wVersionRequested = MAKEWORD(2, 2);
+
+ (void) WSAStartup(wVersionRequested, &wsaData);
+#endif
+ if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1)
+ return (1);
+
+ /* Initalize the event library */
+ if (!(base = event_base_new()))
+ return (1);
+
+ /* Initalize a timeout to terminate the test */
+ timeout = evtimer_new(base,timeout_cb,&timeout);
+ /* and watch for writability on one end of the pipe */
+ ev = event_new(base,pair[1],EV_WRITE | EV_PERSIST, write_cb, &ev);
+
+ tv.tv_sec = 1;
+ tv.tv_usec = 500*1000;
+
+ evtimer_add(timeout, &tv);
+
+ event_add(ev, NULL);
+
+ start_cpu_usage_timer(&timer);
+
+ event_base_dispatch(base);
+
+ event_free(ev);
+ event_free(timeout);
+ event_base_free(base);
+
+ get_cpu_usage(&timer, &secPassed, &secUsed, &usage);
+
+ /* attempt to calculate our cpu usage over the test should be
+ virtually nil */
+
+ printf("usec used=%d, usec passed=%d, cpu usage=%.2f%%\n",
+ (int)(secUsed*1e6),
+ (int)(secPassed*1e6),
+ usage*100);
+
+ if (usage > 50.0) /* way too high */
+ return 1;
+
+ return 0;
+}
+
diff --git a/libs/libevent/docs/test/test-closed.c b/libs/libevent/docs/test/test-closed.c
new file mode 100644
index 0000000000..5b04f354ac
--- /dev/null
+++ b/libs/libevent/docs/test/test-closed.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2013 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#else
+#include <unistd.h>
+#endif
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include <event.h>
+#include <evutil.h>
+
+#ifdef EVENT____func__
+#define __func__ EVENT____func__
+#endif
+
+struct timeval timeout = {3, 0};
+
+static void
+closed_cb(evutil_socket_t fd, short event, void *arg)
+{
+ if (EV_TIMEOUT & event) {
+ printf("%s: Timeout!\n", __func__);
+ exit(1);
+ }
+
+ if (EV_CLOSED & event) {
+ printf("%s: detected socket close with success\n", __func__);
+ return;
+ }
+
+ printf("%s: unable to detect socket close\n", __func__);
+ exit(1);
+}
+
+#ifndef SHUT_WR
+#define SHUT_WR 1
+#endif
+
+int
+main(int argc, char **argv)
+{
+ struct event_base *base;
+ struct event_config *cfg;
+ struct event *ev;
+ const char *test = "test string";
+ evutil_socket_t pair[2];
+
+ /* Initialize the library and check if the backend
+ supports EV_FEATURE_EARLY_CLOSE
+ */
+ cfg = event_config_new();
+ event_config_require_features(cfg, EV_FEATURE_EARLY_CLOSE);
+ base = event_base_new_with_config(cfg);
+ event_config_free(cfg);
+ if (!base) {
+ /* Backend doesn't support EV_FEATURE_EARLY_CLOSE */
+ return 0;
+ }
+
+ /* Create a pair of sockets */
+ if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1)
+ return (1);
+
+ /* Send some data on socket 0 and immediately close it */
+ if (send(pair[0], test, (int)strlen(test)+1, 0) < 0)
+ return (1);
+ shutdown(pair[0], SHUT_WR);
+
+ /* Dispatch */
+ ev = event_new(base, pair[1], EV_CLOSED | EV_TIMEOUT, closed_cb, event_self_cbarg());
+ event_add(ev, &timeout);
+ event_base_dispatch(base);
+
+ /* Finalize library */
+ event_base_free(base);
+ return 0;
+}
+
diff --git a/libs/libevent/docs/test/test-dumpevents.c b/libs/libevent/docs/test/test-dumpevents.c
new file mode 100644
index 0000000000..1c272d4c01
--- /dev/null
+++ b/libs/libevent/docs/test/test-dumpevents.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "util-internal.h"
+#include "event2/event-config.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <windows.h>
+#else
+#include <unistd.h>
+#endif
+
+#include <stdio.h>
+#include <event2/event.h>
+#include <signal.h>
+
+static void
+sock_perror(const char *s)
+{
+#ifdef _WIN32
+ const char *err = evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR());
+ fprintf(stderr, "%s: %s\n", s, err);
+#else
+ perror(s);
+#endif
+}
+
+static void
+callback1(evutil_socket_t fd, short events, void *arg)
+{
+}
+static void
+callback2(evutil_socket_t fd, short events, void *arg)
+{
+}
+
+/* Testing code for event_base_dump_events().
+
+ Notes that just because we have code to exercise this function,
+ doesn't mean that *ANYTHING* about the output format is guaranteed to
+ remain in the future.
+ */
+int
+main(int argc, char **argv)
+{
+#define N_EVENTS 13
+ int i;
+ struct event *ev[N_EVENTS];
+ evutil_socket_t pair1[2];
+ evutil_socket_t pair2[2];
+ struct timeval tv_onesec = {1,0};
+ struct timeval tv_two5sec = {2,500*1000};
+ const struct timeval *tv_onesec_common;
+ const struct timeval *tv_two5sec_common;
+ struct event_base *base;
+ struct timeval now;
+
+#ifdef _WIN32
+ WORD wVersionRequested;
+ WSADATA wsaData;
+
+ wVersionRequested = MAKEWORD(2, 2);
+
+ WSAStartup(wVersionRequested, &wsaData);
+#endif
+
+#ifdef _WIN32
+#define LOCAL_SOCKETPAIR_AF AF_INET
+#else
+#define LOCAL_SOCKETPAIR_AF AF_UNIX
+#endif
+
+ if (evutil_make_internal_pipe_(pair1) < 0 ||
+ evutil_make_internal_pipe_(pair2) < 0) {
+ sock_perror("evutil_make_internal_pipe_");
+ return 1;
+ }
+
+ if (!(base = event_base_new())) {
+ fprintf(stderr,"Couldn't make event_base\n");
+ return 2;
+ }
+
+ tv_onesec_common = event_base_init_common_timeout(base, &tv_onesec);
+ tv_two5sec_common = event_base_init_common_timeout(base, &tv_two5sec);
+
+ ev[0] = event_new(base, pair1[0], EV_WRITE, callback1, NULL);
+ ev[1] = event_new(base, pair1[1], EV_READ|EV_PERSIST, callback1, NULL);
+ ev[2] = event_new(base, pair2[0], EV_WRITE|EV_PERSIST, callback2, NULL);
+ ev[3] = event_new(base, pair2[1], EV_READ, callback2, NULL);
+
+ /* For timers */
+ ev[4] = evtimer_new(base, callback1, NULL);
+ ev[5] = evtimer_new(base, callback1, NULL);
+ ev[6] = evtimer_new(base, callback1, NULL);
+ ev[7] = event_new(base, -1, EV_PERSIST, callback2, NULL);
+ ev[8] = event_new(base, -1, EV_PERSIST, callback2, NULL);
+ ev[9] = event_new(base, -1, EV_PERSIST, callback2, NULL);
+
+ /* To activate */
+ ev[10] = event_new(base, -1, 0, callback1, NULL);
+ ev[11] = event_new(base, -1, 0, callback2, NULL);
+
+ /* Signals */
+ ev[12] = evsignal_new(base, SIGINT, callback2, NULL);
+
+ event_add(ev[0], NULL);
+ event_add(ev[1], &tv_onesec);
+ event_add(ev[2], tv_onesec_common);
+ event_add(ev[3], tv_two5sec_common);
+
+ event_add(ev[4], tv_onesec_common);
+ event_add(ev[5], tv_onesec_common);
+ event_add(ev[6], &tv_onesec);
+ event_add(ev[7], tv_two5sec_common);
+ event_add(ev[8], tv_onesec_common);
+ event_add(ev[9], &tv_two5sec);
+
+ event_active(ev[10], EV_READ, 1);
+ event_active(ev[11], EV_READ|EV_WRITE|EV_TIMEOUT, 1);
+ event_active(ev[1], EV_READ, 1);
+
+ event_add(ev[12], NULL);
+
+ evutil_gettimeofday(&now,NULL);
+ puts("=====expected");
+ printf("Now= %ld.%06d\n",(long)now.tv_sec,(int)now.tv_usec);
+ puts("Inserted:");
+ printf(" %p [fd %ld] Write\n",ev[0],(long)pair1[0]);
+ printf(" %p [fd %ld] Read Persist Timeout=T+1\n",ev[1],(long)pair1[1]);
+ printf(" %p [fd %ld] Write Persist Timeout=T+1\n",ev[2],(long)pair2[0]);
+ printf(" %p [fd %ld] Read Timeout=T+2.5\n",ev[3],(long)pair2[1]);
+ printf(" %p [fd -1] Timeout=T+1\n",ev[4]);
+ printf(" %p [fd -1] Timeout=T+1\n",ev[5]);
+ printf(" %p [fd -1] Timeout=T+1\n",ev[6]);
+ printf(" %p [fd -1] Persist Timeout=T+2.5\n",ev[7]);
+ printf(" %p [fd -1] Persist Timeout=T+1\n",ev[8]);
+ printf(" %p [fd -1] Persist Timeout=T+2.5\n",ev[9]);
+ printf(" %p [sig %d] Signal Persist\n", ev[12], (int)SIGINT);
+
+ puts("Active:");
+ printf(" %p [fd -1, priority=0] Read active\n", ev[10]);
+ printf(" %p [fd -1, priority=0] Read Write Timeout active\n", ev[11]);
+ printf(" %p [fd %ld, priority=0] Read active\n", ev[1], (long)pair1[1]);
+
+ puts("======received");
+ event_base_dump_events(base, stdout);
+
+ for (i = 0; i < N_EVENTS; ++i) {
+ event_free(ev[i]);
+ }
+ event_base_free(base);
+
+ return 0;
+}
+
diff --git a/libs/libevent/docs/test/test-eof.c b/libs/libevent/docs/test/test-eof.c
new file mode 100644
index 0000000000..a9ca5343a2
--- /dev/null
+++ b/libs/libevent/docs/test/test-eof.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#else
+#include <unistd.h>
+#endif
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include <event.h>
+#include <evutil.h>
+
+#ifdef EVENT____func__
+#define __func__ EVENT____func__
+#endif
+
+int test_okay = 1;
+int called = 0;
+struct timeval timeout = {60, 0};
+
+static void
+read_cb(evutil_socket_t fd, short event, void *arg)
+{
+ char buf[256];
+ int len;
+
+ if (EV_TIMEOUT & event) {
+ printf("%s: Timeout!\n", __func__);
+ exit(1);
+ }
+
+ len = recv(fd, buf, sizeof(buf), 0);
+
+ printf("%s: read %d%s\n", __func__,
+ len, len ? "" : " - means EOF");
+
+ if (len) {
+ if (!called)
+ event_add(arg, &timeout);
+ } else if (called == 1)
+ test_okay = 0;
+
+ called++;
+}
+
+#ifndef SHUT_WR
+#define SHUT_WR 1
+#endif
+
+int
+main(int argc, char **argv)
+{
+ struct event ev;
+ const char *test = "test string";
+ evutil_socket_t pair[2];
+
+#ifdef _WIN32
+ WORD wVersionRequested;
+ WSADATA wsaData;
+
+ wVersionRequested = MAKEWORD(2, 2);
+
+ (void) WSAStartup(wVersionRequested, &wsaData);
+#endif
+
+ if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1)
+ return (1);
+
+
+ if (send(pair[0], test, (int)strlen(test)+1, 0) < 0)
+ return (1);
+ shutdown(pair[0], SHUT_WR);
+
+ /* Initalize the event library */
+ event_init();
+
+ /* Initalize one event */
+ event_set(&ev, pair[1], EV_READ | EV_TIMEOUT, read_cb, &ev);
+
+ event_add(&ev, &timeout);
+
+ event_dispatch();
+
+ return (test_okay);
+}
+
diff --git a/libs/libevent/docs/test/test-fdleak.c b/libs/libevent/docs/test/test-fdleak.c
new file mode 100644
index 0000000000..4c4eba25e7
--- /dev/null
+++ b/libs/libevent/docs/test/test-fdleak.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2012 Ross Lagerwall <rosslagerwall@gmail.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+
+#ifdef _WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#endif
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef EVENT__HAVE_SYS_RESOURCE_H
+#include <sys/resource.h>
+#endif
+#ifdef EVENT__HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+
+#include "event2/event.h"
+#include "event2/bufferevent.h"
+#include "event2/buffer.h"
+#include "event2/listener.h"
+
+/* Number of requests to make. Setting this too high might result in the machine
+ running out of ephemeral ports */
+#ifdef _WIN32
+#define MAX_REQUESTS 1000
+#else
+#define MAX_REQUESTS 4000
+#endif
+
+/* Provide storage for the address, both for the server & the clients */
+static struct sockaddr_in saddr;
+
+/* Number of sucessful requests so far */
+static int num_requests;
+
+static void start_client(struct event_base *base);
+
+static void
+my_perror(const char *s)
+{
+ fprintf(stderr, "%s: %s",
+ s, evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()));
+}
+
+/*
+===============================================
+Server functions
+===============================================
+*/
+
+/* Read a byte from the client and write it back */
+static void
+server_read_cb(struct bufferevent *bev, void *ctx)
+{
+ while (evbuffer_get_length(bufferevent_get_input(bev))) {
+ unsigned char tmp;
+ bufferevent_read(bev, &tmp, 1);
+ bufferevent_write(bev, &tmp, 1);
+ }
+}
+
+/* Wait for an EOF and then free the bufferevent */
+static void
+server_event_cb(struct bufferevent *bev, short events, void *ctx)
+{
+ if (events & BEV_EVENT_ERROR) {
+ my_perror("Error from bufferevent");
+ exit(1);
+ } else if (events & (BEV_EVENT_EOF | BEV_EVENT_ERROR)) {
+ bufferevent_free(bev);
+ }
+}
+
+/* Accept a client socket and set it up to for reading & writing */
+static void
+listener_accept_cb(struct evconnlistener *listener, evutil_socket_t sock,
+ struct sockaddr *addr, int len, void *ptr)
+{
+ struct event_base *base = evconnlistener_get_base(listener);
+ struct bufferevent *bev = bufferevent_socket_new(base, sock,
+ BEV_OPT_CLOSE_ON_FREE);
+
+ bufferevent_setcb(bev, server_read_cb, NULL, server_event_cb, NULL);
+ bufferevent_enable(bev, EV_READ|EV_WRITE);
+}
+
+/* Start the server listening on a random port and start the first client. */
+static void
+start_loop(void)
+{
+ struct event_base *base;
+ struct evconnlistener *listener;
+ struct sockaddr_storage ss;
+ ev_socklen_t socklen = sizeof(ss);
+ evutil_socket_t fd;
+
+ base = event_base_new();
+ if (base == NULL) {
+ puts("Could not open event base!");
+ exit(1);
+ }
+
+ listener = evconnlistener_new_bind(base, listener_accept_cb, NULL,
+ LEV_OPT_CLOSE_ON_FREE|LEV_OPT_REUSEABLE,
+ -1, (struct sockaddr *)&saddr, sizeof(saddr));
+ if (listener == NULL) {
+ my_perror("Could not create listener!");
+ exit(1);
+ }
+ fd = evconnlistener_get_fd(listener);
+ if (fd < 0) {
+ puts("Couldn't get fd from listener");
+ exit(1);
+ }
+ if (getsockname(fd, (struct sockaddr *)&ss, &socklen) < 0) {
+ my_perror("getsockname()");
+ exit(1);
+ }
+ memcpy(&saddr, &ss, sizeof(saddr));
+ if (saddr.sin_family != AF_INET) {
+ puts("AF mismatch from getsockname().");
+ exit(1);
+ }
+
+ start_client(base);
+
+ event_base_dispatch(base);
+}
+
+/*
+===============================================
+Client functions
+===============================================
+*/
+
+/* Check that the server sends back the same byte that the client sent.
+ If MAX_REQUESTS have been reached, exit. Otherwise, start another client. */
+static void
+client_read_cb(struct bufferevent *bev, void *ctx)
+{
+ unsigned char tmp;
+ struct event_base *base = bufferevent_get_base(bev);
+
+ bufferevent_read(bev, &tmp, 1);
+ if (tmp != 'A') {
+ puts("Incorrect data received!");
+ exit(2);
+ }
+ bufferevent_free(bev);
+
+ num_requests++;
+ if (num_requests == MAX_REQUESTS) {
+ event_base_loopbreak(base);
+ } else {
+ start_client(base);
+ }
+}
+
+/* Send a byte to the server. */
+static void
+client_event_cb(struct bufferevent *bev, short events, void *ctx)
+{
+ if (events & BEV_EVENT_CONNECTED) {
+ unsigned char tmp = 'A';
+ bufferevent_write(bev, &tmp, 1);
+ } else if (events & BEV_EVENT_ERROR) {
+ puts("Client socket got error!");
+ exit(2);
+ }
+
+ bufferevent_enable(bev, EV_READ);
+}
+
+/* Open a client socket to connect to localhost on sin */
+static void
+start_client(struct event_base *base)
+{
+ struct bufferevent *bev = bufferevent_socket_new(base, -1,
+ BEV_OPT_CLOSE_ON_FREE);
+ bufferevent_setcb(bev, client_read_cb, NULL, client_event_cb, NULL);
+
+ if (bufferevent_socket_connect(bev, (struct sockaddr *)&saddr,
+ sizeof(saddr)) < 0) {
+ my_perror("Could not connect!");
+ bufferevent_free(bev);
+ exit(2);
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+#ifdef EVENT__HAVE_SETRLIMIT
+ /* Set the fd limit to a low value so that any fd leak is caught without
+ making many requests. */
+ struct rlimit rl;
+ rl.rlim_cur = rl.rlim_max = 20;
+ if (setrlimit(RLIMIT_NOFILE, &rl) == -1) {
+ my_perror("setrlimit");
+ exit(3);
+ }
+#endif
+
+#ifdef _WIN32
+ WSADATA WSAData;
+ WSAStartup(0x101, &WSAData);
+#endif
+
+ /* Set up an address, used by both client & server. */
+ memset(&saddr, 0, sizeof(saddr));
+ saddr.sin_family = AF_INET;
+ saddr.sin_addr.s_addr = htonl(0x7f000001);
+ saddr.sin_port = 0; /* Tell the implementation to pick a port. */
+
+ start_loop();
+
+ return 0;
+}
+
+/* XXX why does this test cause so much latency sometimes (OSX 10.5)? */
diff --git a/libs/libevent/docs/test/test-init.c b/libs/libevent/docs/test/test-init.c
new file mode 100644
index 0000000000..92fbc6b146
--- /dev/null
+++ b/libs/libevent/docs/test/test-init.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2003-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+#include <errno.h>
+
+#include <event.h>
+
+int
+main(int argc, char **argv)
+{
+#ifdef _WIN32
+ WORD wVersionRequested;
+ WSADATA wsaData;
+
+ wVersionRequested = MAKEWORD(2, 2);
+
+ (void) WSAStartup(wVersionRequested, &wsaData);
+#endif
+
+ /* Initalize the event library */
+ event_init();
+
+ return (0);
+}
+
diff --git a/libs/libevent/docs/test/test-ratelim.c b/libs/libevent/docs/test/test-ratelim.c
new file mode 100644
index 0000000000..17babfdcbc
--- /dev/null
+++ b/libs/libevent/docs/test/test-ratelim.c
@@ -0,0 +1,601 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "../util-internal.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <math.h>
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#else
+#include <sys/socket.h>
+#include <netinet/in.h>
+# ifdef _XOPEN_SOURCE_EXTENDED
+# include <arpa/inet.h>
+# endif
+#endif
+#include <signal.h>
+
+#include "event2/bufferevent.h"
+#include "event2/buffer.h"
+#include "event2/event.h"
+#include "event2/util.h"
+#include "event2/listener.h"
+#include "event2/thread.h"
+
+static struct evutil_weakrand_state weakrand_state;
+
+static int cfg_verbose = 0;
+static int cfg_help = 0;
+
+static int cfg_n_connections = 30;
+static int cfg_duration = 5;
+static int cfg_connlimit = 0;
+static int cfg_grouplimit = 0;
+static int cfg_tick_msec = 1000;
+static int cfg_min_share = -1;
+static int cfg_group_drain = 0;
+
+static int cfg_connlimit_tolerance = -1;
+static int cfg_grouplimit_tolerance = -1;
+static int cfg_stddev_tolerance = -1;
+
+#ifdef _WIN32
+static int cfg_enable_iocp = 0;
+#endif
+
+static struct timeval cfg_tick = { 0, 500*1000 };
+
+static struct ev_token_bucket_cfg *conn_bucket_cfg = NULL;
+static struct ev_token_bucket_cfg *group_bucket_cfg = NULL;
+struct bufferevent_rate_limit_group *ratelim_group = NULL;
+static double seconds_per_tick = 0.0;
+
+struct client_state {
+ size_t queued;
+ ev_uint64_t received;
+
+};
+static const struct timeval *ms100_common=NULL;
+
+/* info from check_bucket_levels_cb */
+static int total_n_bev_checks = 0;
+static ev_int64_t total_rbucket_level=0;
+static ev_int64_t total_wbucket_level=0;
+static ev_int64_t total_max_to_read=0;
+static ev_int64_t total_max_to_write=0;
+static ev_int64_t max_bucket_level=EV_INT64_MIN;
+static ev_int64_t min_bucket_level=EV_INT64_MAX;
+
+/* from check_group_bucket_levels_cb */
+static int total_n_group_bev_checks = 0;
+static ev_int64_t total_group_rbucket_level = 0;
+static ev_int64_t total_group_wbucket_level = 0;
+
+static int n_echo_conns_open = 0;
+
+/* Info on the open connections */
+struct bufferevent **bevs;
+struct client_state *states;
+struct bufferevent_rate_limit_group *group = NULL;
+
+static void check_bucket_levels_cb(evutil_socket_t fd, short events, void *arg);
+
+static void
+loud_writecb(struct bufferevent *bev, void *ctx)
+{
+ struct client_state *cs = ctx;
+ struct evbuffer *output = bufferevent_get_output(bev);
+ char buf[1024];
+ int r = evutil_weakrand_(&weakrand_state);
+ memset(buf, r, sizeof(buf));
+ while (evbuffer_get_length(output) < 8192) {
+ evbuffer_add(output, buf, sizeof(buf));
+ cs->queued += sizeof(buf);
+ }
+}
+
+static void
+discard_readcb(struct bufferevent *bev, void *ctx)
+{
+ struct client_state *cs = ctx;
+ struct evbuffer *input = bufferevent_get_input(bev);
+ size_t len = evbuffer_get_length(input);
+ evbuffer_drain(input, len);
+ cs->received += len;
+}
+
+static void
+write_on_connectedcb(struct bufferevent *bev, short what, void *ctx)
+{
+ if (what & BEV_EVENT_CONNECTED) {
+ loud_writecb(bev, ctx);
+ /* XXXX this shouldn't be needed. */
+ bufferevent_enable(bev, EV_READ|EV_WRITE);
+ }
+}
+
+static void
+echo_readcb(struct bufferevent *bev, void *ctx)
+{
+ struct evbuffer *input = bufferevent_get_input(bev);
+ struct evbuffer *output = bufferevent_get_output(bev);
+
+ evbuffer_add_buffer(output, input);
+ if (evbuffer_get_length(output) > 1024000)
+ bufferevent_disable(bev, EV_READ);
+}
+
+static void
+echo_writecb(struct bufferevent *bev, void *ctx)
+{
+ struct evbuffer *output = bufferevent_get_output(bev);
+ if (evbuffer_get_length(output) < 512000)
+ bufferevent_enable(bev, EV_READ);
+}
+
+static void
+echo_eventcb(struct bufferevent *bev, short what, void *ctx)
+{
+ if (what & (BEV_EVENT_EOF|BEV_EVENT_ERROR)) {
+ --n_echo_conns_open;
+ bufferevent_free(bev);
+ }
+}
+
+static void
+echo_listenercb(struct evconnlistener *listener, evutil_socket_t newsock,
+ struct sockaddr *sourceaddr, int socklen, void *ctx)
+{
+ struct event_base *base = ctx;
+ int flags = BEV_OPT_CLOSE_ON_FREE|BEV_OPT_THREADSAFE;
+ struct bufferevent *bev;
+
+ bev = bufferevent_socket_new(base, newsock, flags);
+ bufferevent_setcb(bev, echo_readcb, echo_writecb, echo_eventcb, NULL);
+ if (conn_bucket_cfg) {
+ struct event *check_event =
+ event_new(base, -1, EV_PERSIST, check_bucket_levels_cb, bev);
+ bufferevent_set_rate_limit(bev, conn_bucket_cfg);
+
+ assert(bufferevent_get_token_bucket_cfg(bev) != NULL);
+ event_add(check_event, ms100_common);
+ }
+ if (ratelim_group)
+ bufferevent_add_to_rate_limit_group(bev, ratelim_group);
+ ++n_echo_conns_open;
+ bufferevent_enable(bev, EV_READ|EV_WRITE);
+}
+
+/* Called periodically to check up on how full the buckets are */
+static void
+check_bucket_levels_cb(evutil_socket_t fd, short events, void *arg)
+{
+ struct bufferevent *bev = arg;
+
+ ev_ssize_t r = bufferevent_get_read_limit(bev);
+ ev_ssize_t w = bufferevent_get_write_limit(bev);
+ ev_ssize_t rm = bufferevent_get_max_to_read(bev);
+ ev_ssize_t wm = bufferevent_get_max_to_write(bev);
+ /* XXXX check that no value is above the cofigured burst
+ * limit */
+ total_rbucket_level += r;
+ total_wbucket_level += w;
+ total_max_to_read += rm;
+ total_max_to_write += wm;
+#define B(x) \
+ if ((x) > max_bucket_level) \
+ max_bucket_level = (x); \
+ if ((x) < min_bucket_level) \
+ min_bucket_level = (x)
+ B(r);
+ B(w);
+#undef B
+
+ total_n_bev_checks++;
+ if (total_n_bev_checks >= .8 * ((double)cfg_duration / cfg_tick_msec) * cfg_n_connections) {
+ event_free(event_base_get_running_event(bufferevent_get_base(bev)));
+ }
+}
+
+static void
+check_group_bucket_levels_cb(evutil_socket_t fd, short events, void *arg)
+{
+ if (ratelim_group) {
+ ev_ssize_t r = bufferevent_rate_limit_group_get_read_limit(ratelim_group);
+ ev_ssize_t w = bufferevent_rate_limit_group_get_write_limit(ratelim_group);
+ total_group_rbucket_level += r;
+ total_group_wbucket_level += w;
+ }
+ ++total_n_group_bev_checks;
+}
+
+static void
+group_drain_cb(evutil_socket_t fd, short events, void *arg)
+{
+ bufferevent_rate_limit_group_decrement_read(ratelim_group, cfg_group_drain);
+ bufferevent_rate_limit_group_decrement_write(ratelim_group, cfg_group_drain);
+}
+
+static int
+test_ratelimiting(void)
+{
+ struct event_base *base;
+ struct sockaddr_in sin;
+ struct evconnlistener *listener;
+
+ struct sockaddr_storage ss;
+ ev_socklen_t slen;
+
+ int i;
+
+ struct timeval tv;
+
+ ev_uint64_t total_received;
+ double total_sq_persec, total_persec;
+ double variance;
+ double expected_total_persec = -1.0, expected_avg_persec = -1.0;
+ int ok = 1;
+ struct event_config *base_cfg;
+ struct event *periodic_level_check;
+ struct event *group_drain_event=NULL;
+
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = htonl(0x7f000001); /* 127.0.0.1 */
+ sin.sin_port = 0; /* unspecified port */
+
+ if (0)
+ event_enable_debug_mode();
+
+ base_cfg = event_config_new();
+
+#ifdef _WIN32
+ if (cfg_enable_iocp) {
+ evthread_use_windows_threads();
+ event_config_set_flag(base_cfg, EVENT_BASE_FLAG_STARTUP_IOCP);
+ }
+#endif
+
+ base = event_base_new_with_config(base_cfg);
+ event_config_free(base_cfg);
+ if (! base) {
+ fprintf(stderr, "Couldn't create event_base");
+ return 1;
+ }
+
+ listener = evconnlistener_new_bind(base, echo_listenercb, base,
+ LEV_OPT_CLOSE_ON_FREE|LEV_OPT_REUSEABLE, -1,
+ (struct sockaddr *)&sin, sizeof(sin));
+ if (! listener) {
+ fprintf(stderr, "Couldn't create listener");
+ return 1;
+ }
+
+ slen = sizeof(ss);
+ if (getsockname(evconnlistener_get_fd(listener), (struct sockaddr *)&ss,
+ &slen) < 0) {
+ perror("getsockname");
+ return 1;
+ }
+
+ if (cfg_connlimit > 0) {
+ conn_bucket_cfg = ev_token_bucket_cfg_new(
+ cfg_connlimit, cfg_connlimit * 4,
+ cfg_connlimit, cfg_connlimit * 4,
+ &cfg_tick);
+ assert(conn_bucket_cfg);
+ }
+
+ if (cfg_grouplimit > 0) {
+ group_bucket_cfg = ev_token_bucket_cfg_new(
+ cfg_grouplimit, cfg_grouplimit * 4,
+ cfg_grouplimit, cfg_grouplimit * 4,
+ &cfg_tick);
+ group = ratelim_group = bufferevent_rate_limit_group_new(
+ base, group_bucket_cfg);
+ expected_total_persec = cfg_grouplimit - (cfg_group_drain / seconds_per_tick);
+ expected_avg_persec = cfg_grouplimit / cfg_n_connections;
+ if (cfg_connlimit > 0 && expected_avg_persec > cfg_connlimit)
+ expected_avg_persec = cfg_connlimit;
+ if (cfg_min_share >= 0)
+ bufferevent_rate_limit_group_set_min_share(
+ ratelim_group, cfg_min_share);
+ }
+
+ if (expected_avg_persec < 0 && cfg_connlimit > 0)
+ expected_avg_persec = cfg_connlimit;
+
+ if (expected_avg_persec > 0)
+ expected_avg_persec /= seconds_per_tick;
+ if (expected_total_persec > 0)
+ expected_total_persec /= seconds_per_tick;
+
+ bevs = calloc(cfg_n_connections, sizeof(struct bufferevent *));
+ states = calloc(cfg_n_connections, sizeof(struct client_state));
+
+ for (i = 0; i < cfg_n_connections; ++i) {
+ bevs[i] = bufferevent_socket_new(base, -1,
+ BEV_OPT_CLOSE_ON_FREE|BEV_OPT_THREADSAFE);
+ assert(bevs[i]);
+ bufferevent_setcb(bevs[i], discard_readcb, loud_writecb,
+ write_on_connectedcb, &states[i]);
+ bufferevent_enable(bevs[i], EV_READ|EV_WRITE);
+ bufferevent_socket_connect(bevs[i], (struct sockaddr *)&ss,
+ slen);
+ }
+
+ tv.tv_sec = cfg_duration - 1;
+ tv.tv_usec = 995000;
+
+ event_base_loopexit(base, &tv);
+
+ tv.tv_sec = 0;
+ tv.tv_usec = 100*1000;
+ ms100_common = event_base_init_common_timeout(base, &tv);
+
+ periodic_level_check = event_new(base, -1, EV_PERSIST, check_group_bucket_levels_cb, NULL);
+ event_add(periodic_level_check, ms100_common);
+
+ if (cfg_group_drain && ratelim_group) {
+ group_drain_event = event_new(base, -1, EV_PERSIST, group_drain_cb, NULL);
+ event_add(group_drain_event, &cfg_tick);
+ }
+
+ event_base_dispatch(base);
+
+ ratelim_group = NULL; /* So no more responders get added */
+ event_free(periodic_level_check);
+ if (group_drain_event)
+ event_del(group_drain_event);
+
+ for (i = 0; i < cfg_n_connections; ++i) {
+ bufferevent_free(bevs[i]);
+ }
+ evconnlistener_free(listener);
+
+ /* Make sure no new echo_conns get added to the group. */
+ ratelim_group = NULL;
+
+ /* This should get _everybody_ freed */
+ while (n_echo_conns_open) {
+ printf("waiting for %d conns\n", n_echo_conns_open);
+ tv.tv_sec = 0;
+ tv.tv_usec = 300000;
+ event_base_loopexit(base, &tv);
+ event_base_dispatch(base);
+ }
+
+ if (group)
+ bufferevent_rate_limit_group_free(group);
+
+ if (total_n_bev_checks) {
+ printf("Average read bucket level: %f\n",
+ (double)total_rbucket_level/total_n_bev_checks);
+ printf("Average write bucket level: %f\n",
+ (double)total_wbucket_level/total_n_bev_checks);
+ printf("Highest read bucket level: %f\n",
+ (double)max_bucket_level);
+ printf("Highest write bucket level: %f\n",
+ (double)min_bucket_level);
+ printf("Average max-to-read: %f\n",
+ ((double)total_max_to_read)/total_n_bev_checks);
+ printf("Average max-to-write: %f\n",
+ ((double)total_max_to_write)/total_n_bev_checks);
+ }
+ if (total_n_group_bev_checks) {
+ printf("Average group read bucket level: %f\n",
+ ((double)total_group_rbucket_level)/total_n_group_bev_checks);
+ printf("Average group write bucket level: %f\n",
+ ((double)total_group_wbucket_level)/total_n_group_bev_checks);
+ }
+
+ total_received = 0;
+ total_persec = 0.0;
+ total_sq_persec = 0.0;
+ for (i=0; i < cfg_n_connections; ++i) {
+ double persec = states[i].received;
+ persec /= cfg_duration;
+ total_received += states[i].received;
+ total_persec += persec;
+ total_sq_persec += persec*persec;
+ printf("%d: %f per second\n", i+1, persec);
+ }
+ printf(" total: %f per second\n",
+ ((double)total_received)/cfg_duration);
+ if (expected_total_persec > 0) {
+ double diff = expected_total_persec -
+ ((double)total_received/cfg_duration);
+ printf(" [Off by %lf]\n", diff);
+ if (cfg_grouplimit_tolerance > 0 &&
+ fabs(diff) > cfg_grouplimit_tolerance) {
+ fprintf(stderr, "Group bandwidth out of bounds\n");
+ ok = 0;
+ }
+ }
+
+ printf(" average: %f per second\n",
+ (((double)total_received)/cfg_duration)/cfg_n_connections);
+ if (expected_avg_persec > 0) {
+ double diff = expected_avg_persec - (((double)total_received)/cfg_duration)/cfg_n_connections;
+ printf(" [Off by %lf]\n", diff);
+ if (cfg_connlimit_tolerance > 0 &&
+ fabs(diff) > cfg_connlimit_tolerance) {
+ fprintf(stderr, "Connection bandwidth out of bounds\n");
+ ok = 0;
+ }
+ }
+
+ variance = total_sq_persec/cfg_n_connections - total_persec*total_persec/(cfg_n_connections*cfg_n_connections);
+
+ printf(" stddev: %f per second\n", sqrt(variance));
+ if (cfg_stddev_tolerance > 0 &&
+ sqrt(variance) > cfg_stddev_tolerance) {
+ fprintf(stderr, "Connection variance out of bounds\n");
+ ok = 0;
+ }
+
+ event_base_free(base);
+ free(bevs);
+ free(states);
+
+ return ok ? 0 : 1;
+}
+
+static struct option {
+ const char *name; int *ptr; int min; int isbool;
+} options[] = {
+ { "-v", &cfg_verbose, 0, 1 },
+ { "-h", &cfg_help, 0, 1 },
+ { "-n", &cfg_n_connections, 1, 0 },
+ { "-d", &cfg_duration, 1, 0 },
+ { "-c", &cfg_connlimit, 0, 0 },
+ { "-g", &cfg_grouplimit, 0, 0 },
+ { "-G", &cfg_group_drain, -100000, 0 },
+ { "-t", &cfg_tick_msec, 10, 0 },
+ { "--min-share", &cfg_min_share, 0, 0 },
+ { "--check-connlimit", &cfg_connlimit_tolerance, 0, 0 },
+ { "--check-grouplimit", &cfg_grouplimit_tolerance, 0, 0 },
+ { "--check-stddev", &cfg_stddev_tolerance, 0, 0 },
+#ifdef _WIN32
+ { "--iocp", &cfg_enable_iocp, 0, 1 },
+#endif
+ { NULL, NULL, -1, 0 },
+};
+
+static int
+handle_option(int argc, char **argv, int *i, const struct option *opt)
+{
+ long val;
+ char *endptr = NULL;
+ if (opt->isbool) {
+ *opt->ptr = 1;
+ return 0;
+ }
+ if (*i + 1 == argc) {
+ fprintf(stderr, "Too few arguments to '%s'\n",argv[*i]);
+ return -1;
+ }
+ val = strtol(argv[*i+1], &endptr, 10);
+ if (*argv[*i+1] == '\0' || !endptr || *endptr != '\0') {
+ fprintf(stderr, "Couldn't parse numeric value '%s'\n",
+ argv[*i+1]);
+ return -1;
+ }
+ if (val < opt->min || val > 0x7fffffff) {
+ fprintf(stderr, "Value '%s' is out-of-range'\n",
+ argv[*i+1]);
+ return -1;
+ }
+ *opt->ptr = (int)val;
+ ++*i;
+ return 0;
+}
+
+static void
+usage(void)
+{
+ fprintf(stderr,
+"test-ratelim [-v] [-n INT] [-d INT] [-c INT] [-g INT] [-t INT]\n\n"
+"Pushes bytes through a number of possibly rate-limited connections, and\n"
+"displays average throughput.\n\n"
+" -n INT: Number of connections to open (default: 30)\n"
+" -d INT: Duration of the test in seconds (default: 5 sec)\n");
+ fprintf(stderr,
+" -c INT: Connection-rate limit applied to each connection in bytes per second\n"
+" (default: None.)\n"
+" -g INT: Group-rate limit applied to sum of all usage in bytes per second\n"
+" (default: None.)\n"
+" -G INT: drain INT bytes from the group limit every tick. (default: 0)\n"
+" -t INT: Granularity of timing, in milliseconds (default: 1000 msec)\n");
+}
+
+int
+main(int argc, char **argv)
+{
+ int i,j;
+ double ratio;
+
+#ifdef _WIN32
+ WORD wVersionRequested = MAKEWORD(2,2);
+ WSADATA wsaData;
+
+ (void) WSAStartup(wVersionRequested, &wsaData);
+#endif
+
+ evutil_weakrand_seed_(&weakrand_state, 0);
+
+#ifndef _WIN32
+ if (signal(SIGPIPE, SIG_IGN) == SIG_ERR)
+ return 1;
+#endif
+ for (i = 1; i < argc; ++i) {
+ for (j = 0; options[j].name; ++j) {
+ if (!strcmp(argv[i],options[j].name)) {
+ if (handle_option(argc,argv,&i,&options[j])<0)
+ return 1;
+ goto again;
+ }
+ }
+ fprintf(stderr, "Unknown option '%s'\n", argv[i]);
+ usage();
+ return 1;
+ again:
+ ;
+ }
+ if (cfg_help) {
+ usage();
+ return 0;
+ }
+
+ cfg_tick.tv_sec = cfg_tick_msec / 1000;
+ cfg_tick.tv_usec = (cfg_tick_msec % 1000)*1000;
+
+ seconds_per_tick = ratio = cfg_tick_msec / 1000.0;
+
+ cfg_connlimit *= ratio;
+ cfg_grouplimit *= ratio;
+
+ {
+ struct timeval tv;
+ evutil_gettimeofday(&tv, NULL);
+#ifdef _WIN32
+ srand(tv.tv_usec);
+#else
+ srandom(tv.tv_usec);
+#endif
+ }
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ evthread_enable_lock_debugging();
+#endif
+
+ return test_ratelimiting();
+}
diff --git a/libs/libevent/docs/test/test-ratelim.sh b/libs/libevent/docs/test/test-ratelim.sh
new file mode 100644
index 0000000000..b5e0ca62a9
--- /dev/null
+++ b/libs/libevent/docs/test/test-ratelim.sh
@@ -0,0 +1,88 @@
+#!/bin/sh
+
+FAILED=no
+
+if test "x$TEST_OUTPUT_FILE" = "x"
+then
+ TEST_OUTPUT_FILE=/dev/null
+fi
+
+# /bin/echo is a little more likely to support -n than sh's builtin echo.
+if test -x /bin/echo
+then
+ ECHO=/bin/echo
+else
+ ECHO=echo
+fi
+
+if test "$TEST_OUTPUT_FILE" != "/dev/null"
+then
+ touch "$TEST_OUTPUT_FILE" || exit 1
+fi
+
+TEST_DIR=.
+
+T=`echo "$0" | sed -e 's/test-ratelim.sh$//'`
+if test -x "$T/test-ratelim"
+then
+ TEST_DIR="$T"
+fi
+
+announce () {
+ echo $@
+ echo $@ >>"$TEST_OUTPUT_FILE"
+}
+
+announce_n () {
+ $ECHO -n $@
+ echo $@ >>"$TEST_OUTPUT_FILE"
+}
+
+
+run_tests () {
+ announce_n " Group limits, no connection limit:"
+ if $TEST_DIR/test-ratelim -g 30000 -n 30 -t 100 --check-grouplimit 1000 --check-stddev 100 >>"$TEST_OUTPUT_FILE"
+ then
+ announce OKAY
+ else
+ announce FAILED
+ FAILED=yes
+ fi
+
+ announce_n " Connection limit, no group limit:"
+ if $TEST_DIR/test-ratelim -c 1000 -n 30 -t 100 --check-connlimit 50 --check-stddev 50 >>"$TEST_OUTPUT_FILE"
+ then
+ announce OKAY ;
+ else
+ announce FAILED ;
+ FAILED=yes
+ fi
+
+ announce_n " Connection limit and group limit:"
+ if $TEST_DIR/test-ratelim -c 1000 -g 30000 -n 30 -t 100 --check-grouplimit 1000 --check-connlimit 50 --check-stddev 50 >>"$TEST_OUTPUT_FILE"
+ then
+ announce OKAY ;
+ else
+ announce FAILED ;
+ FAILED=yes
+ fi
+
+ announce_n " Connection limit and group limit with independent drain:"
+ if $TEST_DIR/test-ratelim -c 1000 -g 35000 -n 30 -t 100 -G 500 --check-grouplimit 1000 --check-connlimit 50 --check-stddev 50 >>"$TEST_OUTPUT_FILE"
+ then
+ announce OKAY ;
+ else
+ announce FAILED ;
+ FAILED=yes
+ fi
+
+
+}
+
+announce "Running rate-limiting tests:"
+
+run_tests
+
+if test "$FAILED" = "yes"; then
+ exit 1
+fi
diff --git a/libs/libevent/docs/test/test-time.c b/libs/libevent/docs/test/test-time.c
new file mode 100644
index 0000000000..bcc7086df7
--- /dev/null
+++ b/libs/libevent/docs/test/test-time.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#ifndef _WIN32
+#include <unistd.h>
+#include <sys/time.h>
+#endif
+#include <errno.h>
+
+#include "event2/event.h"
+#include "event2/event_compat.h"
+#include "event2/event_struct.h"
+#include "util-internal.h"
+
+int called = 0;
+
+#define NEVENT 20000
+
+struct event *ev[NEVENT];
+
+struct evutil_weakrand_state weakrand_state;
+
+static int
+rand_int(int n)
+{
+ return evutil_weakrand_(&weakrand_state) % n;
+}
+
+static void
+time_cb(evutil_socket_t fd, short event, void *arg)
+{
+ struct timeval tv;
+ int i, j;
+
+ called++;
+
+ if (called < 10*NEVENT) {
+ for (i = 0; i < 10; i++) {
+ j = rand_int(NEVENT);
+ tv.tv_sec = 0;
+ tv.tv_usec = rand_int(50000);
+ if (tv.tv_usec % 2 || called < NEVENT)
+ evtimer_add(ev[j], &tv);
+ else
+ evtimer_del(ev[j]);
+ }
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ struct timeval tv;
+ int i;
+#ifdef _WIN32
+ WORD wVersionRequested;
+ WSADATA wsaData;
+
+ wVersionRequested = MAKEWORD(2, 2);
+
+ (void) WSAStartup(wVersionRequested, &wsaData);
+#endif
+
+ evutil_weakrand_seed_(&weakrand_state, 0);
+
+ /* Initalize the event library */
+ event_init();
+
+ for (i = 0; i < NEVENT; i++) {
+ ev[i] = malloc(sizeof(struct event));
+
+ /* Initalize one event */
+ evtimer_set(ev[i], time_cb, ev[i]);
+ tv.tv_sec = 0;
+ tv.tv_usec = rand_int(50000);
+ evtimer_add(ev[i], &tv);
+ }
+
+ event_dispatch();
+
+
+ printf("%d, %d\n", called, NEVENT);
+ return (called < NEVENT);
+}
+
diff --git a/libs/libevent/docs/test/test-weof.c b/libs/libevent/docs/test/test-weof.c
new file mode 100644
index 0000000000..c379f287cb
--- /dev/null
+++ b/libs/libevent/docs/test/test-weof.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#else
+#include <unistd.h>
+#endif
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <signal.h>
+#include <errno.h>
+
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "event2/event_compat.h"
+#include "event2/util.h"
+
+#ifdef EVENT____func__
+#define __func__ EVENT____func__
+#endif
+
+evutil_socket_t pair[2];
+int test_okay = 1;
+int called = 0;
+
+static void
+write_cb(evutil_socket_t fd, short event, void *arg)
+{
+ const char *test = "test string";
+ int len;
+
+ len = send(fd, test, (int)strlen(test) + 1, 0);
+
+ printf("%s: write %d%s\n", __func__,
+ len, len ? "" : " - means EOF");
+
+ if (len > 0) {
+ if (!called)
+ event_add(arg, NULL);
+ evutil_closesocket(pair[0]);
+ } else if (called == 1)
+ test_okay = 0;
+
+ called++;
+}
+
+int
+main(int argc, char **argv)
+{
+ struct event ev;
+
+#ifdef _WIN32
+ WORD wVersionRequested;
+ WSADATA wsaData;
+
+ wVersionRequested = MAKEWORD(2, 2);
+
+ (void) WSAStartup(wVersionRequested, &wsaData);
+#endif
+
+#ifndef _WIN32
+ if (signal(SIGPIPE, SIG_IGN) == SIG_ERR)
+ return (1);
+#endif
+
+ if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1)
+ return (1);
+
+ /* Initalize the event library */
+ event_init();
+
+ /* Initalize one event */
+ event_set(&ev, pair[1], EV_WRITE, write_cb, &ev);
+
+ event_add(&ev, NULL);
+
+ event_dispatch();
+
+ return (test_okay);
+}
+
diff --git a/libs/libevent/docs/test/test.sh b/libs/libevent/docs/test/test.sh
new file mode 100644
index 0000000000..b73c1adced
--- /dev/null
+++ b/libs/libevent/docs/test/test.sh
@@ -0,0 +1,160 @@
+#!/bin/sh
+
+BACKENDS="EVPORT KQUEUE EPOLL DEVPOLL POLL SELECT WIN32"
+TESTS="test-eof test-closed test-weof test-time test-changelist test-fdleak"
+FAILED=no
+TEST_OUTPUT_FILE=${TEST_OUTPUT_FILE:-/dev/null}
+REGRESS_ARGS=${REGRESS_ARGS:-}
+
+# /bin/echo is a little more likely to support -n than sh's builtin echo,
+# printf is even more likely
+if test "`printf %s hello 2>&1`" = "hello"
+then
+ ECHO_N="printf %s"
+else
+ if test -x /bin/echo
+ then
+ ECHO_N="/bin/echo -n"
+ else
+ ECHO_N="echo -n"
+ fi
+fi
+
+if test "$TEST_OUTPUT_FILE" != "/dev/null"
+then
+ touch "$TEST_OUTPUT_FILE" || exit 1
+fi
+
+TEST_DIR=.
+TEST_SRC_DIR=.
+
+T=`echo "$0" | sed -e 's/test.sh$//' | sed -e 's/test-script.sh//' `
+if test -x "$T/test-init"
+then
+ TEST_DIR="$T"
+elif test -x "./test/test-init"
+then
+ TEST_DIR="./test"
+fi
+if test -f "$T/check-dumpevents.py"
+then
+ TEST_SRC_DIR="$T"
+elif test -f "./test/check-dumpevents.py"
+then
+ TEST_SRC_DIR="./test"
+fi
+
+setup () {
+ for i in $BACKENDS; do
+ eval "EVENT_NO$i=yes; export EVENT_NO$i"
+ done
+ unset EVENT_EPOLL_USE_CHANGELIST
+ unset EVENT_PRECISE_TIMER
+}
+
+announce () {
+ echo "$@"
+ echo "$@" >>"$TEST_OUTPUT_FILE"
+}
+
+announce_n () {
+ $ECHO_N "$@"
+ echo "$@" >>"$TEST_OUTPUT_FILE"
+}
+
+
+run_tests () {
+ if $TEST_DIR/test-init 2>>"$TEST_OUTPUT_FILE" ;
+ then
+ true
+ else
+ announce Skipping test
+ return
+ fi
+ for i in $TESTS; do
+ announce_n " $i: "
+ if $TEST_DIR/$i >>"$TEST_OUTPUT_FILE" ;
+ then
+ announce OKAY ;
+ else
+ announce FAILED ;
+ FAILED=yes
+ fi
+ done
+ announce_n " test-dumpevents: "
+ if python2 -c 'import sys; assert(sys.version_info >= (2, 4))' 2>/dev/null && test -f $TEST_SRC_DIR/check-dumpevents.py; then
+ if $TEST_DIR/test-dumpevents | python2 $TEST_SRC_DIR/check-dumpevents.py >> "$TEST_OUTPUT_FILE" ;
+ then
+ announce OKAY ;
+ else
+ announce FAILED ;
+ fi
+ else
+ # no python
+ if $TEST_DIR/test-dumpevents >/dev/null; then
+ announce "OKAY (output not checked)" ;
+ else
+ announce "FAILED (output not checked)" ;
+ fi
+ fi
+
+ test -x $TEST_DIR/regress || return
+ announce_n " regress: "
+ if test "$TEST_OUTPUT_FILE" = "/dev/null" ;
+ then
+ $TEST_DIR/regress --quiet $REGRESS_ARGS
+ else
+ $TEST_DIR/regress $REGRESS_ARGS >>"$TEST_OUTPUT_FILE"
+ fi
+ if test "$?" = "0" ;
+ then
+ announce OKAY ;
+ else
+ announce FAILED ;
+ FAILED=yes
+ fi
+
+ announce_n " regress_debug: "
+ if test "$TEST_OUTPUT_FILE" = "/dev/null" ;
+ then
+ EVENT_DEBUG_MODE=1 $TEST_DIR/regress --quiet $REGRESS_ARGS
+ else
+ EVENT_DEBUG_MODE=1 $TEST_DIR/regress $REGRESS_ARGS >>"$TEST_OUTPUT_FILE"
+ fi
+ if test "$?" = "0" ;
+ then
+ announce OKAY ;
+ else
+ announce FAILED ;
+ FAILED=yes
+ fi
+}
+
+do_test() {
+ setup
+ announce "$1 $2"
+ unset EVENT_NO$1
+ if test "$2" = "(changelist)" ; then
+ EVENT_EPOLL_USE_CHANGELIST=yes; export EVENT_EPOLL_USE_CHANGELIST
+ elif test "$2" = "(timerfd)" ; then
+ EVENT_PRECISE_TIMER=1; export EVENT_PRECISE_TIMER
+ elif test "$2" = "(timerfd+changelist)" ; then
+ EVENT_EPOLL_USE_CHANGELIST=yes; export EVENT_EPOLL_USE_CHANGELIST
+ EVENT_PRECISE_TIMER=1; export EVENT_PRECISE_TIMER
+ fi
+
+ run_tests
+}
+
+announce "Running tests:"
+
+do_test EPOLL "(timerfd)"
+do_test EPOLL "(changelist)"
+do_test EPOLL "(timerfd+changelist)"
+for i in $BACKENDS; do
+ do_test $i
+done
+
+if test "$FAILED" = "yes"; then
+ exit 1
+fi
diff --git a/libs/libevent/docs/test/tinytest.c b/libs/libevent/docs/test/tinytest.c
new file mode 100644
index 0000000000..3a8e331055
--- /dev/null
+++ b/libs/libevent/docs/test/tinytest.c
@@ -0,0 +1,493 @@
+/* tinytest.c -- Copyright 2009-2012 Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef TINYTEST_LOCAL
+#include "tinytest_local.h"
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#ifndef NO_FORKING
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#endif
+
+#if defined(__APPLE__) && defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__)
+#if (__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1060 && \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1070)
+/* Workaround for a stupid bug in OSX 10.6 */
+#define FORK_BREAKS_GCOV
+#include <vproc.h>
+#endif
+#endif
+
+#endif /* !NO_FORKING */
+
+#ifndef __GNUC__
+#define __attribute__(x)
+#endif
+
+#include "tinytest.h"
+#include "tinytest_macros.h"
+
+#define LONGEST_TEST_NAME 16384
+
+static int in_tinytest_main = 0; /**< true if we're in tinytest_main().*/
+static int n_ok = 0; /**< Number of tests that have passed */
+static int n_bad = 0; /**< Number of tests that have failed. */
+static int n_skipped = 0; /**< Number of tests that have been skipped. */
+
+static int opt_forked = 0; /**< True iff we're called from inside a win32 fork*/
+static int opt_nofork = 0; /**< Suppress calls to fork() for debugging. */
+static int opt_verbosity = 1; /**< -==quiet,0==terse,1==normal,2==verbose */
+const char *verbosity_flag = "";
+
+const struct testlist_alias_t *cfg_aliases=NULL;
+
+enum outcome { SKIP=2, OK=1, FAIL=0 };
+static enum outcome cur_test_outcome = 0;
+const char *cur_test_prefix = NULL; /**< prefix of the current test group */
+/** Name of the current test, if we haven't logged is yet. Used for --quiet */
+const char *cur_test_name = NULL;
+
+#ifdef _WIN32
+/* Copy of argv[0] for win32. */
+static char commandname[MAX_PATH+1];
+#endif
+
+static void usage(struct testgroup_t *groups, int list_groups)
+ __attribute__((noreturn));
+static int process_test_option(struct testgroup_t *groups, const char *test);
+
+static enum outcome
+testcase_run_bare_(const struct testcase_t *testcase)
+{
+ void *env = NULL;
+ int outcome;
+ if (testcase->setup) {
+ env = testcase->setup->setup_fn(testcase);
+ if (!env)
+ return FAIL;
+ else if (env == (void*)TT_SKIP)
+ return SKIP;
+ }
+
+ cur_test_outcome = OK;
+ testcase->fn(env);
+ outcome = cur_test_outcome;
+
+ if (testcase->setup) {
+ if (testcase->setup->cleanup_fn(testcase, env) == 0)
+ outcome = FAIL;
+ }
+
+ return outcome;
+}
+
+#define MAGIC_EXITCODE 42
+
+#ifndef NO_FORKING
+
+static enum outcome
+testcase_run_forked_(const struct testgroup_t *group,
+ const struct testcase_t *testcase)
+{
+#ifdef _WIN32
+ /* Fork? On Win32? How primitive! We'll do what the smart kids do:
+ we'll invoke our own exe (whose name we recall from the command
+ line) with a command line that tells it to run just the test we
+ want, and this time without forking.
+
+ (No, threads aren't an option. The whole point of forking is to
+ share no state between tests.)
+ */
+ int ok;
+ char buffer[LONGEST_TEST_NAME+256];
+ STARTUPINFOA si;
+ PROCESS_INFORMATION info;
+ DWORD exitcode;
+
+ if (!in_tinytest_main) {
+ printf("\nERROR. On Windows, testcase_run_forked_ must be"
+ " called from within tinytest_main.\n");
+ abort();
+ }
+ if (opt_verbosity>0)
+ printf("[forking] ");
+
+ snprintf(buffer, sizeof(buffer), "%s --RUNNING-FORKED %s %s%s",
+ commandname, verbosity_flag, group->prefix, testcase->name);
+
+ memset(&si, 0, sizeof(si));
+ memset(&info, 0, sizeof(info));
+ si.cb = sizeof(si);
+
+ ok = CreateProcessA(commandname, buffer, NULL, NULL, 0,
+ 0, NULL, NULL, &si, &info);
+ if (!ok) {
+ printf("CreateProcess failed!\n");
+ return 0;
+ }
+ WaitForSingleObject(info.hProcess, INFINITE);
+ GetExitCodeProcess(info.hProcess, &exitcode);
+ CloseHandle(info.hProcess);
+ CloseHandle(info.hThread);
+ if (exitcode == 0)
+ return OK;
+ else if (exitcode == MAGIC_EXITCODE)
+ return SKIP;
+ else
+ return FAIL;
+#else
+ int outcome_pipe[2];
+ pid_t pid;
+ (void)group;
+
+ if (pipe(outcome_pipe))
+ perror("opening pipe");
+
+ if (opt_verbosity>0)
+ printf("[forking] ");
+ pid = fork();
+#ifdef FORK_BREAKS_GCOV
+ vproc_transaction_begin(0);
+#endif
+ if (!pid) {
+ /* child. */
+ int test_r, write_r;
+ char b[1];
+ close(outcome_pipe[0]);
+ test_r = testcase_run_bare_(testcase);
+ assert(0<=(int)test_r && (int)test_r<=2);
+ b[0] = "NYS"[test_r];
+ write_r = (int)write(outcome_pipe[1], b, 1);
+ if (write_r != 1) {
+ perror("write outcome to pipe");
+ exit(1);
+ }
+ exit(0);
+ return FAIL; /* unreachable */
+ } else {
+ /* parent */
+ int status, r;
+ char b[1];
+ /* Close this now, so that if the other side closes it,
+ * our read fails. */
+ close(outcome_pipe[1]);
+ r = (int)read(outcome_pipe[0], b, 1);
+ if (r == 0) {
+ printf("[Lost connection!] ");
+ return 0;
+ } else if (r != 1) {
+ perror("read outcome from pipe");
+ }
+ waitpid(pid, &status, 0);
+ close(outcome_pipe[0]);
+ return b[0]=='Y' ? OK : (b[0]=='S' ? SKIP : FAIL);
+ }
+#endif
+}
+
+#endif /* !NO_FORKING */
+
+int
+testcase_run_one(const struct testgroup_t *group,
+ const struct testcase_t *testcase)
+{
+ enum outcome outcome;
+
+ if (testcase->flags & (TT_SKIP|TT_OFF_BY_DEFAULT)) {
+ if (opt_verbosity>0)
+ printf("%s%s: %s\n",
+ group->prefix, testcase->name,
+ (testcase->flags & TT_SKIP) ? "SKIPPED" : "DISABLED");
+ ++n_skipped;
+ return SKIP;
+ }
+
+ if (opt_verbosity>0 && !opt_forked) {
+ printf("%s%s: ", group->prefix, testcase->name);
+ } else {
+ if (opt_verbosity==0) printf(".");
+ cur_test_prefix = group->prefix;
+ cur_test_name = testcase->name;
+ }
+
+#ifndef NO_FORKING
+ if ((testcase->flags & TT_FORK) && !(opt_forked||opt_nofork)) {
+ outcome = testcase_run_forked_(group, testcase);
+ } else {
+#else
+ {
+#endif
+ outcome = testcase_run_bare_(testcase);
+ }
+
+ if (outcome == OK) {
+ ++n_ok;
+ if (opt_verbosity>0 && !opt_forked)
+ puts(opt_verbosity==1?"OK":"");
+ } else if (outcome == SKIP) {
+ ++n_skipped;
+ if (opt_verbosity>0 && !opt_forked)
+ puts("SKIPPED");
+ } else {
+ ++n_bad;
+ if (!opt_forked)
+ printf("\n [%s FAILED]\n", testcase->name);
+ }
+
+ if (opt_forked) {
+ exit(outcome==OK ? 0 : (outcome==SKIP?MAGIC_EXITCODE : 1));
+ return 1; /* unreachable */
+ } else {
+ return (int)outcome;
+ }
+}
+
+int
+tinytest_set_flag_(struct testgroup_t *groups, const char *arg, int set, unsigned long flag)
+{
+ int i, j;
+ size_t length = LONGEST_TEST_NAME;
+ char fullname[LONGEST_TEST_NAME];
+ int found=0;
+ if (strstr(arg, ".."))
+ length = strstr(arg,"..")-arg;
+ for (i=0; groups[i].prefix; ++i) {
+ for (j=0; groups[i].cases[j].name; ++j) {
+ struct testcase_t *testcase = &groups[i].cases[j];
+ snprintf(fullname, sizeof(fullname), "%s%s",
+ groups[i].prefix, testcase->name);
+ if (!flag) { /* Hack! */
+ printf(" %s", fullname);
+ if (testcase->flags & TT_OFF_BY_DEFAULT)
+ puts(" (Off by default)");
+ else if (testcase->flags & TT_SKIP)
+ puts(" (DISABLED)");
+ else
+ puts("");
+ }
+ if (!strncmp(fullname, arg, length)) {
+ if (set)
+ testcase->flags |= flag;
+ else
+ testcase->flags &= ~flag;
+ ++found;
+ }
+ }
+ }
+ return found;
+}
+
+static void
+usage(struct testgroup_t *groups, int list_groups)
+{
+ puts("Options are: [--verbose|--quiet|--terse] [--no-fork]");
+ puts(" Specify tests by name, or using a prefix ending with '..'");
+ puts(" To skip a test, prefix its name with a colon.");
+ puts(" To enable a disabled test, prefix its name with a plus.");
+ puts(" Use --list-tests for a list of tests.");
+ if (list_groups) {
+ puts("Known tests are:");
+ tinytest_set_flag_(groups, "..", 1, 0);
+ }
+ exit(0);
+}
+
+static int
+process_test_alias(struct testgroup_t *groups, const char *test)
+{
+ int i, j, n, r;
+ for (i=0; cfg_aliases && cfg_aliases[i].name; ++i) {
+ if (!strcmp(cfg_aliases[i].name, test)) {
+ n = 0;
+ for (j = 0; cfg_aliases[i].tests[j]; ++j) {
+ r = process_test_option(groups, cfg_aliases[i].tests[j]);
+ if (r<0)
+ return -1;
+ n += r;
+ }
+ return n;
+ }
+ }
+ printf("No such test alias as @%s!",test);
+ return -1;
+}
+
+static int
+process_test_option(struct testgroup_t *groups, const char *test)
+{
+ int flag = TT_ENABLED_;
+ int n = 0;
+ if (test[0] == '@') {
+ return process_test_alias(groups, test + 1);
+ } else if (test[0] == ':') {
+ ++test;
+ flag = TT_SKIP;
+ } else if (test[0] == '+') {
+ ++test;
+ ++n;
+ if (!tinytest_set_flag_(groups, test, 0, TT_OFF_BY_DEFAULT)) {
+ printf("No such test as %s!\n", test);
+ return -1;
+ }
+ } else {
+ ++n;
+ }
+ if (!tinytest_set_flag_(groups, test, 1, flag)) {
+ printf("No such test as %s!\n", test);
+ return -1;
+ }
+ return n;
+}
+
+void
+tinytest_set_aliases(const struct testlist_alias_t *aliases)
+{
+ cfg_aliases = aliases;
+}
+
+int
+tinytest_main(int c, const char **v, struct testgroup_t *groups)
+{
+ int i, j, n=0;
+
+#ifdef _WIN32
+ const char *sp = strrchr(v[0], '.');
+ const char *extension = "";
+ if (!sp || stricmp(sp, ".exe"))
+ extension = ".exe"; /* Add an exe so CreateProcess will work */
+ snprintf(commandname, sizeof(commandname), "%s%s", v[0], extension);
+ commandname[MAX_PATH]='\0';
+#endif
+ for (i=1; i<c; ++i) {
+ if (v[i][0] == '-') {
+ if (!strcmp(v[i], "--RUNNING-FORKED")) {
+ opt_forked = 1;
+ } else if (!strcmp(v[i], "--no-fork")) {
+ opt_nofork = 1;
+ } else if (!strcmp(v[i], "--quiet")) {
+ opt_verbosity = -1;
+ verbosity_flag = "--quiet";
+ } else if (!strcmp(v[i], "--verbose")) {
+ opt_verbosity = 2;
+ verbosity_flag = "--verbose";
+ } else if (!strcmp(v[i], "--terse")) {
+ opt_verbosity = 0;
+ verbosity_flag = "--terse";
+ } else if (!strcmp(v[i], "--help")) {
+ usage(groups, 0);
+ } else if (!strcmp(v[i], "--list-tests")) {
+ usage(groups, 1);
+ } else {
+ printf("Unknown option %s. Try --help\n",v[i]);
+ return -1;
+ }
+ } else {
+ int r = process_test_option(groups, v[i]);
+ if (r<0)
+ return -1;
+ n += r;
+ }
+ }
+ if (!n)
+ tinytest_set_flag_(groups, "..", 1, TT_ENABLED_);
+
+#ifdef _IONBF
+ setvbuf(stdout, NULL, _IONBF, 0);
+#endif
+
+ ++in_tinytest_main;
+ for (i=0; groups[i].prefix; ++i)
+ for (j=0; groups[i].cases[j].name; ++j)
+ if (groups[i].cases[j].flags & TT_ENABLED_)
+ testcase_run_one(&groups[i],
+ &groups[i].cases[j]);
+
+ --in_tinytest_main;
+
+ if (opt_verbosity==0)
+ puts("");
+
+ if (n_bad)
+ printf("%d/%d TESTS FAILED. (%d skipped)\n", n_bad,
+ n_bad+n_ok,n_skipped);
+ else if (opt_verbosity >= 1)
+ printf("%d tests ok. (%d skipped)\n", n_ok, n_skipped);
+
+ return (n_bad == 0) ? 0 : 1;
+}
+
+int
+tinytest_get_verbosity_(void)
+{
+ return opt_verbosity;
+}
+
+void
+tinytest_set_test_failed_(void)
+{
+ if (opt_verbosity <= 0 && cur_test_name) {
+ if (opt_verbosity==0) puts("");
+ printf("%s%s: ", cur_test_prefix, cur_test_name);
+ cur_test_name = NULL;
+ }
+ cur_test_outcome = 0;
+}
+
+void
+tinytest_set_test_skipped_(void)
+{
+ if (cur_test_outcome==OK)
+ cur_test_outcome = SKIP;
+}
+
+char *
+tinytest_format_hex_(const void *val_, unsigned long len)
+{
+ const unsigned char *val = val_;
+ char *result, *cp;
+ size_t i;
+
+ if (!val)
+ return strdup("null");
+ if (!(result = malloc(len*2+1)))
+ return strdup("<allocation failure>");
+ cp = result;
+ for (i=0;i<len;++i) {
+ *cp++ = "0123456789ABCDEF"[val[i] >> 4];
+ *cp++ = "0123456789ABCDEF"[val[i] & 0x0f];
+ }
+ *cp = 0;
+ return result;
+}
diff --git a/libs/libevent/docs/test/tinytest.h b/libs/libevent/docs/test/tinytest.h
new file mode 100644
index 0000000000..ed07b26bc0
--- /dev/null
+++ b/libs/libevent/docs/test/tinytest.h
@@ -0,0 +1,100 @@
+/* tinytest.h -- Copyright 2009-2012 Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TINYTEST_H_INCLUDED_
+#define TINYTEST_H_INCLUDED_
+
+/** Flag for a test that needs to run in a subprocess. */
+#define TT_FORK (1<<0)
+/** Runtime flag for a test we've decided to skip. */
+#define TT_SKIP (1<<1)
+/** Internal runtime flag for a test we've decided to run. */
+#define TT_ENABLED_ (1<<2)
+/** Flag for a test that's off by default. */
+#define TT_OFF_BY_DEFAULT (1<<3)
+/** If you add your own flags, make them start at this point. */
+#define TT_FIRST_USER_FLAG (1<<4)
+
+typedef void (*testcase_fn)(void *);
+
+struct testcase_t;
+
+/** Functions to initialize/teardown a structure for a testcase. */
+struct testcase_setup_t {
+ /** Return a new structure for use by a given testcase. */
+ void *(*setup_fn)(const struct testcase_t *);
+ /** Clean/free a structure from setup_fn. Return 1 if ok, 0 on err. */
+ int (*cleanup_fn)(const struct testcase_t *, void *);
+};
+
+/** A single test-case that you can run. */
+struct testcase_t {
+ const char *name; /**< An identifier for this case. */
+ testcase_fn fn; /**< The function to run to implement this case. */
+ unsigned long flags; /**< Bitfield of TT_* flags. */
+ const struct testcase_setup_t *setup; /**< Optional setup/cleanup fns*/
+ void *setup_data; /**< Extra data usable by setup function */
+};
+#define END_OF_TESTCASES { NULL, NULL, 0, NULL, NULL }
+
+/** A group of tests that are selectable together. */
+struct testgroup_t {
+ const char *prefix; /**< Prefix to prepend to testnames. */
+ struct testcase_t *cases; /** Array, ending with END_OF_TESTCASES */
+};
+#define END_OF_GROUPS { NULL, NULL}
+
+struct testlist_alias_t {
+ const char *name;
+ const char **tests;
+};
+#define END_OF_ALIASES { NULL, NULL }
+
+/** Implementation: called from a test to indicate failure, before logging. */
+void tinytest_set_test_failed_(void);
+/** Implementation: called from a test to indicate that we're skipping. */
+void tinytest_set_test_skipped_(void);
+/** Implementation: return 0 for quiet, 1 for normal, 2 for loud. */
+int tinytest_get_verbosity_(void);
+/** Implementation: Set a flag on tests matching a name; returns number
+ * of tests that matched. */
+int tinytest_set_flag_(struct testgroup_t *, const char *, int set, unsigned long);
+/** Implementation: Put a chunk of memory into hex. */
+char *tinytest_format_hex_(const void *, unsigned long);
+
+/** Set all tests in 'groups' matching the name 'named' to be skipped. */
+#define tinytest_skip(groups, named) \
+ tinytest_set_flag_(groups, named, 1, TT_SKIP)
+
+/** Run a single testcase in a single group. */
+int testcase_run_one(const struct testgroup_t *,const struct testcase_t *);
+
+void tinytest_set_aliases(const struct testlist_alias_t *aliases);
+
+/** Run a set of testcases from an END_OF_GROUPS-terminated array of groups,
+ as selected from the command line. */
+int tinytest_main(int argc, const char **argv, struct testgroup_t *groups);
+
+#endif
diff --git a/libs/libevent/docs/test/tinytest_demo.c b/libs/libevent/docs/test/tinytest_demo.c
new file mode 100644
index 0000000000..f6bfd66a1a
--- /dev/null
+++ b/libs/libevent/docs/test/tinytest_demo.c
@@ -0,0 +1,262 @@
+/* tinytest_demo.c -- Copyright 2009-2012 Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/* Welcome to the example file for tinytest! I'll show you how to set up
+ * some simple and not-so-simple testcases. */
+
+/* Make sure you include these headers. */
+#include "tinytest.h"
+#include "tinytest_macros.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <time.h>
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <unistd.h>
+#endif
+
+/* ============================================================ */
+
+/* First, let's see if strcmp is working. (All your test cases should be
+ * functions declared to take a single void * as an argument.) */
+void
+test_strcmp(void *data)
+{
+ (void)data; /* This testcase takes no data. */
+
+ /* Let's make sure the empty string is equal to itself */
+ if (strcmp("","")) {
+ /* This macro tells tinytest to stop the current test
+ * and go straight to the "end" label. */
+ tt_abort_msg("The empty string was not equal to itself");
+ }
+
+ /* Pretty often, calling tt_abort_msg to indicate failure is more
+ heavy-weight than you want. Instead, just say: */
+ tt_assert(strcmp("testcase", "testcase") == 0);
+
+ /* Occasionally, you don't want to stop the current testcase just
+ because a single assertion has failed. In that case, use
+ tt_want: */
+ tt_want(strcmp("tinytest", "testcase") > 0);
+
+ /* You can use the tt_*_op family of macros to compare values and to
+ fail unless they have the relationship you want. They produce
+ more useful output than tt_assert, since they display the actual
+ values of the failing things.
+
+ Fail unless strcmp("abc, "abc") == 0 */
+ tt_int_op(strcmp("abc", "abc"), ==, 0);
+
+ /* Fail unless strcmp("abc, "abcd") is less than 0 */
+ tt_int_op(strcmp("abc", "abcd"), < , 0);
+
+ /* Incidentally, there's a test_str_op that uses strcmp internally. */
+ tt_str_op("abc", <, "abcd");
+
+
+ /* Every test-case function needs to finish with an "end:"
+ label and (optionally) code to clean up local variables. */
+ end:
+ ;
+}
+
+/* ============================================================ */
+
+/* Now let's mess with setup and teardown functions! These are handy if
+ you have a bunch of tests that all need a similar environment, and you
+ want to reconstruct that environment freshly for each one. */
+
+/* First you declare a type to hold the environment info, and functions to
+ set it up and tear it down. */
+struct data_buffer {
+ /* We're just going to have couple of character buffer. Using
+ setup/teardown functions is probably overkill for this case.
+
+ You could also do file descriptors, complicated handles, temporary
+ files, etc. */
+ char buffer1[512];
+ char buffer2[512];
+};
+/* The setup function needs to take a const struct testcase_t and return
+ void* */
+void *
+setup_data_buffer(const struct testcase_t *testcase)
+{
+ struct data_buffer *db = malloc(sizeof(struct data_buffer));
+
+ /* If you had a complicated set of setup rules, you might behave
+ differently here depending on testcase->flags or
+ testcase->setup_data or even or testcase->name. */
+
+ /* Returning a NULL here would mean that we couldn't set up for this
+ test, so we don't need to test db for null. */
+ return db;
+}
+/* The clean function deallocates storage carefully and returns true on
+ success. */
+int
+clean_data_buffer(const struct testcase_t *testcase, void *ptr)
+{
+ struct data_buffer *db = ptr;
+
+ if (db) {
+ free(db);
+ return 1;
+ }
+ return 0;
+}
+/* Finally, declare a testcase_setup_t with these functions. */
+struct testcase_setup_t data_buffer_setup = {
+ setup_data_buffer, clean_data_buffer
+};
+
+
+/* Now let's write our test. */
+void
+test_memcpy(void *ptr)
+{
+ /* This time, we use the argument. */
+ struct data_buffer *db = ptr;
+
+ /* We'll also introduce a local variable that might need cleaning up. */
+ char *mem = NULL;
+
+ /* Let's make sure that memcpy does what we'd like. */
+ strcpy(db->buffer1, "String 0");
+ memcpy(db->buffer2, db->buffer1, sizeof(db->buffer1));
+ tt_str_op(db->buffer1, ==, db->buffer2);
+
+ /* This one works if there's an internal NUL. */
+ tt_mem_op(db->buffer1, <, db->buffer2, sizeof(db->buffer1));
+
+ /* Now we've allocated memory that's referenced by a local variable.
+ The end block of the function will clean it up. */
+ mem = strdup("Hello world.");
+ tt_assert(mem);
+
+ /* Another rather trivial test. */
+ tt_str_op(db->buffer1, !=, mem);
+
+ end:
+ /* This time our end block has something to do. */
+ if (mem)
+ free(mem);
+}
+
+void
+test_timeout(void *ptr)
+{
+ time_t t1, t2;
+ (void)ptr;
+ t1 = time(NULL);
+#ifdef _WIN32
+ Sleep(5000);
+#else
+ sleep(5);
+#endif
+ t2 = time(NULL);
+
+ tt_int_op(t2-t1, >=, 4);
+
+ tt_int_op(t2-t1, <=, 6);
+
+ end:
+ ;
+}
+
+/* ============================================================ */
+
+/* Now we need to make sure that our tests get invoked. First, you take
+ a bunch of related tests and put them into an array of struct testcase_t.
+*/
+
+struct testcase_t demo_tests[] = {
+ /* Here's a really simple test: it has a name you can refer to it
+ with, and a function to invoke it. */
+ { "strcmp", test_strcmp, },
+
+ /* The second test has a flag, "TT_FORK", to make it run in a
+ subprocess, and a pointer to the testcase_setup_t that configures
+ its environment. */
+ { "memcpy", test_memcpy, TT_FORK, &data_buffer_setup },
+
+ /* This flag is off-by-default, since it takes a while to run. You
+ * can enable it manually by passing +demo/timeout at the command line.*/
+ { "timeout", test_timeout, TT_OFF_BY_DEFAULT },
+
+ /* The array has to end with END_OF_TESTCASES. */
+ END_OF_TESTCASES
+};
+
+/* Next, we make an array of testgroups. This is mandatory. Unlike more
+ heavy-duty testing frameworks, groups can't nest. */
+struct testgroup_t groups[] = {
+
+ /* Every group has a 'prefix', and an array of tests. That's it. */
+ { "demo/", demo_tests },
+
+ END_OF_GROUPS
+};
+
+/* We can also define test aliases. These can be used for types of tests that
+ * cut across groups. */
+const char *alltests[] = { "+..", NULL };
+const char *slowtests[] = { "+demo/timeout", NULL };
+struct testlist_alias_t aliases[] = {
+
+ { "ALL", alltests },
+ { "SLOW", slowtests },
+
+ END_OF_ALIASES
+};
+
+
+int
+main(int c, const char **v)
+{
+ /* Finally, just call tinytest_main(). It lets you specify verbose
+ or quiet output with --verbose and --quiet. You can list
+ specific tests:
+
+ tinytest-demo demo/memcpy
+
+ or use a ..-wildcard to select multiple tests with a common
+ prefix:
+
+ tinytest-demo demo/..
+
+ If you list no tests, you get them all by default, so that
+ "tinytest-demo" and "tinytest-demo .." mean the same thing.
+
+ */
+ tinytest_set_aliases(aliases);
+ return tinytest_main(c, v, groups);
+}
diff --git a/libs/libevent/docs/test/tinytest_local.h b/libs/libevent/docs/test/tinytest_local.h
new file mode 100644
index 0000000000..87ec2fa67e
--- /dev/null
+++ b/libs/libevent/docs/test/tinytest_local.h
@@ -0,0 +1,12 @@
+
+#include "util-internal.h"
+#ifdef _WIN32
+#include <winsock2.h>
+#endif
+
+#include "event2/util.h"
+
+#ifdef snprintf
+#undef snprintf
+#endif
+#define snprintf evutil_snprintf
diff --git a/libs/libevent/docs/test/tinytest_macros.h b/libs/libevent/docs/test/tinytest_macros.h
new file mode 100644
index 0000000000..c3728d1fdd
--- /dev/null
+++ b/libs/libevent/docs/test/tinytest_macros.h
@@ -0,0 +1,199 @@
+/* tinytest_macros.h -- Copyright 2009-2012 Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TINYTEST_MACROS_H_INCLUDED_
+#define TINYTEST_MACROS_H_INCLUDED_
+
+/* Helpers for defining statement-like macros */
+#define TT_STMT_BEGIN do {
+#define TT_STMT_END } while (0)
+
+/* Redefine this if your test functions want to abort with something besides
+ * "goto end;" */
+#ifndef TT_EXIT_TEST_FUNCTION
+#define TT_EXIT_TEST_FUNCTION TT_STMT_BEGIN goto end; TT_STMT_END
+#endif
+
+/* Redefine this if you want to note success/failure in some different way. */
+#ifndef TT_DECLARE
+#define TT_DECLARE(prefix, args) \
+ TT_STMT_BEGIN \
+ printf("\n %s %s:%d: ",prefix,__FILE__,__LINE__); \
+ printf args ; \
+ TT_STMT_END
+#endif
+
+/* Announce a failure. Args are parenthesized printf args. */
+#define TT_GRIPE(args) TT_DECLARE("FAIL", args)
+
+/* Announce a non-failure if we're verbose. */
+#define TT_BLATHER(args) \
+ TT_STMT_BEGIN \
+ if (tinytest_get_verbosity_()>1) TT_DECLARE(" OK", args); \
+ TT_STMT_END
+
+#define TT_DIE(args) \
+ TT_STMT_BEGIN \
+ tinytest_set_test_failed_(); \
+ TT_GRIPE(args); \
+ TT_EXIT_TEST_FUNCTION; \
+ TT_STMT_END
+
+#define TT_FAIL(args) \
+ TT_STMT_BEGIN \
+ tinytest_set_test_failed_(); \
+ TT_GRIPE(args); \
+ TT_STMT_END
+
+/* Fail and abort the current test for the reason in msg */
+#define tt_abort_printf(msg) TT_DIE(msg)
+#define tt_abort_perror(op) TT_DIE(("%s: %s [%d]",(op),strerror(errno), errno))
+#define tt_abort_msg(msg) TT_DIE(("%s", msg))
+#define tt_abort() TT_DIE(("%s", "(Failed.)"))
+
+/* Fail but do not abort the current test for the reason in msg. */
+#define tt_failprint_f(msg) TT_FAIL(msg)
+#define tt_fail_perror(op) TT_FAIL(("%s: %s [%d]",(op),strerror(errno), errno))
+#define tt_fail_msg(msg) TT_FAIL(("%s", msg))
+#define tt_fail() TT_FAIL(("%s", "(Failed.)"))
+
+/* End the current test, and indicate we are skipping it. */
+#define tt_skip() \
+ TT_STMT_BEGIN \
+ tinytest_set_test_skipped_(); \
+ TT_EXIT_TEST_FUNCTION; \
+ TT_STMT_END
+
+#define tt_want_(b, msg, fail) \
+ TT_STMT_BEGIN \
+ if (!(b)) { \
+ tinytest_set_test_failed_(); \
+ TT_GRIPE(("%s",msg)); \
+ fail; \
+ } else { \
+ TT_BLATHER(("%s",msg)); \
+ } \
+ TT_STMT_END
+
+/* Assert b, but do not stop the test if b fails. Log msg on failure. */
+#define tt_want_msg(b, msg) \
+ tt_want_(b, msg, );
+
+/* Assert b and stop the test if b fails. Log msg on failure. */
+#define tt_assert_msg(b, msg) \
+ tt_want_(b, msg, TT_EXIT_TEST_FUNCTION);
+
+/* Assert b, but do not stop the test if b fails. */
+#define tt_want(b) tt_want_msg( (b), "want("#b")")
+/* Assert b, and stop the test if b fails. */
+#define tt_assert(b) tt_assert_msg((b), "assert("#b")")
+
+#define tt_assert_test_fmt_type(a,b,str_test,type,test,printf_type,printf_fmt, \
+ setup_block,cleanup_block,die_on_fail) \
+ TT_STMT_BEGIN \
+ type val1_ = (a); \
+ type val2_ = (b); \
+ int tt_status_ = (test); \
+ if (!tt_status_ || tinytest_get_verbosity_()>1) { \
+ printf_type print_; \
+ printf_type print1_; \
+ printf_type print2_; \
+ type value_ = val1_; \
+ setup_block; \
+ print1_ = print_; \
+ value_ = val2_; \
+ setup_block; \
+ print2_ = print_; \
+ TT_DECLARE(tt_status_?" OK":"FAIL", \
+ ("assert(%s): "printf_fmt" vs "printf_fmt, \
+ str_test, print1_, print2_)); \
+ print_ = print1_; \
+ cleanup_block; \
+ print_ = print2_; \
+ cleanup_block; \
+ if (!tt_status_) { \
+ tinytest_set_test_failed_(); \
+ die_on_fail ; \
+ } \
+ } \
+ TT_STMT_END
+
+#define tt_assert_test_type(a,b,str_test,type,test,fmt,die_on_fail) \
+ tt_assert_test_fmt_type(a,b,str_test,type,test,type,fmt, \
+ {print_=value_;},{},die_on_fail)
+
+#define tt_assert_test_type_opt(a,b,str_test,type,test,fmt,die_on_fail) \
+ tt_assert_test_fmt_type(a,b,str_test,type,test,type,fmt, \
+ {print_=value_?value_:"<NULL>";},{},die_on_fail)
+
+/* Helper: assert that a op b, when cast to type. Format the values with
+ * printf format fmt on failure. */
+#define tt_assert_op_type(a,op,b,type,fmt) \
+ tt_assert_test_type(a,b,#a" "#op" "#b,type,(val1_ op val2_),fmt, \
+ TT_EXIT_TEST_FUNCTION)
+
+#define tt_int_op(a,op,b) \
+ tt_assert_test_type(a,b,#a" "#op" "#b,long,(val1_ op val2_), \
+ "%ld",TT_EXIT_TEST_FUNCTION)
+
+#define tt_uint_op(a,op,b) \
+ tt_assert_test_type(a,b,#a" "#op" "#b,unsigned long, \
+ (val1_ op val2_),"%lu",TT_EXIT_TEST_FUNCTION)
+
+#define tt_ptr_op(a,op,b) \
+ tt_assert_test_type(a,b,#a" "#op" "#b,const void*, \
+ (val1_ op val2_),"%p",TT_EXIT_TEST_FUNCTION)
+
+#define tt_str_op(a,op,b) \
+ tt_assert_test_type_opt(a,b,#a" "#op" "#b,const char *, \
+ (val1_ && val2_ && strcmp(val1_,val2_) op 0),"<%s>", \
+ TT_EXIT_TEST_FUNCTION)
+
+#define tt_mem_op(expr1, op, expr2, len) \
+ tt_assert_test_fmt_type(expr1,expr2,#expr1" "#op" "#expr2, \
+ const void *, \
+ (val1_ && val2_ && memcmp(val1_, val2_, len) op 0), \
+ char *, "%s", \
+ { print_ = tinytest_format_hex_(value_, (len)); }, \
+ { if (print_) free(print_); }, \
+ TT_EXIT_TEST_FUNCTION \
+ );
+
+#define tt_want_int_op(a,op,b) \
+ tt_assert_test_type(a,b,#a" "#op" "#b,long,(val1_ op val2_),"%ld",(void)0)
+
+#define tt_want_uint_op(a,op,b) \
+ tt_assert_test_type(a,b,#a" "#op" "#b,unsigned long, \
+ (val1_ op val2_),"%lu",(void)0)
+
+#define tt_want_ptr_op(a,op,b) \
+ tt_assert_test_type(a,b,#a" "#op" "#b,const void*, \
+ (val1_ op val2_),"%p",(void)0)
+
+#define tt_want_str_op(a,op,b) \
+ tt_assert_test_type(a,b,#a" "#op" "#b,const char *, \
+ (strcmp(val1_,val2_) op 0),"<%s>",(void)0)
+
+#endif
diff --git a/libs/libevent/docs/whatsnew-2.0.txt b/libs/libevent/docs/whatsnew-2.0.txt
new file mode 100644
index 0000000000..3561fcb94c
--- /dev/null
+++ b/libs/libevent/docs/whatsnew-2.0.txt
@@ -0,0 +1,609 @@
+What's New In Libevent 2.0 so far:
+
+1. Meta-issues
+
+1.1. About this document
+
+ This document describes the key differences between Libevent 1.4 and
+ Libevent 2.0, from a user's point of view. It was most recently
+ updated based on features in git master as of August 2010.
+
+ NOTE: I am very sure that I missed some thing on this list. Caveat
+ haxxor.
+
+1.2. Better documentation
+
+ There is now a book-in-progress that explains how to use Libevent and its
+ growing pile of APIs. As of this writing, it covers everything except the
+ http and rpc code. Check out the latest draft at
+ http://www.wangafu.net/~nickm/libevent-book/ .
+
+2. New and Improved Event APIs
+
+ Many APIs are improved, refactored, or deprecated in Libevent 2.0.
+
+ COMPATIBILITY:
+
+ Nearly all existing code that worked with Libevent 1.4 should still
+ work correctly with Libevent 2.0. However, if you are writing new code,
+ or if you want to port old code, we strongly recommend using the new APIs
+ and avoiding deprecated APIs as much as possible.
+
+ Binaries linked against Libevent 1.4 will need to be recompiled to link
+ against Libevent 2.0. This is nothing new; we have never been good at
+ preserving binary compatibility between releases. We'll try harder in the
+ future, though: see 2.1 below.
+
+2.1. New header layout for improved forward-compatibility
+
+ Libevent 2.0 has a new header layout to make it easier for programmers to
+ write good, well-supported libevent code. The new headers are divided
+ into three types.
+
+ There are *regular headers*, like event2/event.h. These headers contain
+ the functions that most programmers will want to use.
+
+ There are *backward compatibility headers*, like event2/event_compat.h.
+ These headers contain declarations for deprecated functions from older
+ versions of Libevent. Documentation in these headers should suggest what's
+ wrong with the old functions, and what functions you want to start using
+ instead of the old ones. Some of these functions might be removed in a
+ future release. New programs should generally not include these headers.
+
+ Finally, there are *structure headers*, like event2/event_struct.h.
+ These headers contain definitions of some structures that Libevent has
+ historically exposed. Exposing them caused problems in the past,
+ since programs that were compiled to work with one version of Libevent
+ would often stop working with another version that changed the size or
+ layout of some object. We've moving them into separate headers so
+ that programmers can know that their code is not depending on any
+ unstable aspect of the Libvent ABI. New programs should generally not
+ include these headers unless they really know what they are doing, are
+ willing to rebuild their software whenever they want to link it
+ against a new version of Libevent, and are willing to risk their code
+ breaking if and when data structures change.
+
+ Functionality that once was located in event.h is now more subdivided.
+ The core event logic is now in event2/event.h. The "evbuffer" functions
+ for low-level buffer manipulation are in event2/buffer.h. The
+ "bufferevent" functions for higher-level buffered IO are in
+ event2/bufferevent.h.
+
+ COMPATIBILITY:
+
+ All of the old headers (event.h, evdns.h, evhttp.h, evrpc.h, and
+ evutil.h) will continue to work by including the corresponding new
+ headers. Old code should not be broken by this change.
+
+2.2. New thread-safe, binary-compatible, harder-to-mess-up APIs
+
+ Some aspects of the historical Libevent API have encouraged
+ non-threadsafe code, or forced code built against one version of Libevent
+ to no longer build with another. The problems with now-deprecated APIs
+ fell into two categories:
+
+ 1) Dependence on the "current" event_base. In an application with
+ multiple event_bases, Libevent previously had a notion of the
+ "current" event_base. New events were linked to this base, and
+ the caller needed to explicitly reattach them to another base.
+ This was horribly error-prone.
+
+ Functions like "event_set" that worked with the "current" event_base
+ are now deprecated but still available (see 2.1). There are new
+ functions like "event_assign" that take an explicit event_base
+ argument when setting up a structure. Using these functions will help
+ prevent errors in your applications, and to be more threadsafe.
+
+ 2) Structure dependence. Applications needed to allocate 'struct
+ event' themselves, since there was no function in Libevent to do it
+ for them. But since the size and contents of struct event can
+ change between libevent versions, this created binary-compatibility
+ nightmares. All structures of this kind are now isolated in
+ _struct.h header (see 2.1), and there are new allocate-and-
+ initialize functions you can use instead of the old initialize-only
+ functions. For example, instead of malloc and event_set, you
+ can use event_new().
+
+ (For people who do really want to allocate a struct event on the
+ stack, or put one inside another structure, you can still use
+ event2/event_compat.h.)
+
+ So in the case where old code would look like this:
+
+ #include <event.h>
+ ...
+ struct event *ev = malloc(sizeof(struct event));
+ /* This call will cause a buffer overrun if you compile with one version
+ of Libevent and link dynamically against another. */
+ event_set(ev, fd, EV_READ, cb, NULL);
+ /* If you forget this call, your code will break in hard-to-diagnose
+ ways in the presence of multiple event bases. */
+ event_set_base(ev, base);
+
+ New code will look more like this:
+
+ #include <event2/event.h>
+ ...
+ struct event *ev;
+ ev = event_new(base, fd, EV_READ, cb, NULL);
+
+2.3. Overrideable allocation functions
+
+ If you want to override the allocation functions used by libevent
+ (for example, to use a specialized allocator, or debug memory
+ issues, or so on), you can replace them by calling
+ event_set_mem_functions. It takes replacements for malloc(),
+ free(), and realloc().
+
+ If you're going to use this facility, you need to call it _before_
+ Libevent does any memory allocation; otherwise, Libevent may allocate some
+ memory with malloc(), and free it with the free() function you provide.
+
+ You can disable this feature when you are building Libevent by passing
+ the --disable-malloc-replacement argument to configure.
+
+2.4. Configurable event_base creation
+
+ Older versions of Libevent would always got the fastest backend
+ available, unless you reconfigured their behavior with the environment
+ variables EVENT_NOSELECT, EVENT_NOPOLL, and so forth. This was annoying
+ to programmers who wanted to pick a backend explicitly without messing
+ with the environment.
+
+ Also, despite our best efforts, not every backend supports every
+ operation we might like. Some features (like edge-triggered events, or
+ working with non-socket file descriptors) only work with some operating
+ systems' fast backends. Previously, programmers who cared about this
+ needed to know which backends supported what. This tended to get quite
+ ungainly.
+
+ There is now an API to choose backends, either by name or by feature.
+ Here is an example:
+
+ struct event_config_t *config;
+ struct event_base *base;
+
+ /* Create a new configuration object. */
+ config = event_config_new();
+ /* We don't want to use the "select" method. */
+ event_config_avoid_method(config, "select");
+ /* We want a method that can work with non-socket file descriptors */
+ event_config_require_features(config, EV_FEATURE_FDS);
+
+ base = event_base_new_with_config(config);
+ if (!base) {
+ /* There is no backend method that does what we want. */
+ exit(1);
+ }
+ event_config_free(config);
+
+ Supported features are documented in event2/event.h
+
+2.5. Socket is now an abstract type
+
+ All APIs that formerly accepted int as a socket type now accept
+ "evutil_socket_t". On Unix, this is just an alias for "int" as
+ before. On Windows, however, it's an alias for SOCKET, which can
+ be wider than int on 64-bit platforms.
+
+2.6. Timeouts and persistent events work together.
+
+ Previously, it wasn't useful to set a timeout on a persistent event:
+ the timeout would trigger once, and never again. This is not what
+ applications tend to want. Instead, applications tend to want every
+ triggering of the event to re-set the timeout. So now, if you set
+ up an event like this:
+ struct event *ev;
+ struct timeval tv;
+ ev = event_new(base, fd, EV_READ|EV_PERSIST, cb, NULL);
+ tv.tv_sec = 1;
+ tv.tv_usec = 0;
+ event_add(ev, &tv);
+
+ The callback 'cb' will be invoked whenever fd is ready to read, OR whenever
+ a second has passed since the last invocation of cb.
+
+2.7. Multiple events allowed per fd
+
+ Older versions of Libevent allowed at most one EV_READ event and at most
+ one EV_WRITE event per socket, per event base. This restriction is no
+ longer present.
+
+2.8. evthread_* functions for thread-safe structures.
+
+ Libevent structures can now be built with locking support. This code
+ makes it safe to add, remove, and activate events on an event base from a
+ different thread. (Previously, if you wanted to write multithreaded code
+ with Libevent, you could only an event_base or its events in one thread at
+ a time.)
+
+ If you want threading support and you're using pthreads, you can just
+ call evthread_use_pthreads(). (You'll need to link against the
+ libevent_pthreads library in addition to libevent_core. These functions are
+ not in libevent_core.)
+
+ If you want threading support and you're using Windows, you can just
+ call evthread_use_windows_threads().
+
+ If you are using some locking system besides Windows and pthreads, You
+ can enable this on a per-event-base level by writing functions to
+ implement mutexes, conditions, and thread IDs, and passing them to
+ evthread_set_lock_callbacks and related functions in event2/thread.h.
+
+ Once locking functions are enabled, every new event_base is created with a
+ lock. You can prevent a single event_base from being built with a lock
+ disabled by using the EVENT_BASE_FLAG_NOLOCK flag in its
+ event_config. If an event_base is created with a lock, it is safe to call
+ event_del, event_add, and event_active on its events from any thread. The
+ event callbacks themselves are still all executed from the thread running
+ the event loop.
+
+ To make an evbuffer or a bufferevent object threadsafe, call its
+ *_enable_locking() function.
+
+ The HTTP api is not currently threadsafe.
+
+ To build Libevent with threading support disabled, pass
+ --disable-thread-support to the configure script.
+
+2.9. Edge-triggered events on some backends.
+
+ With some backends, it's now possible to add the EV_ET flag to an event
+ in order to request that the event's semantics be edge-triggered. Right
+ now, epoll and kqueue support this.
+
+ The corresponding event_config feature is EV_FEATURE_ET; see 2.4 for more
+ information.
+
+2.10. Better support for huge numbers of timeouts
+
+ The heap-based priority queue timer implementation for Libevent 1.4 is good
+ for randomly distributed timeouts, but suboptimal if you have huge numbers
+ of timeouts that all expire in the same amount of time after their
+ creation. The new event_base_init_common_timeout() logic lets you signal
+ that a given timeout interval will be very common, and should use a linked
+ list implementation instead of a priority queue.
+
+2.11. Improved debugging support
+
+ It's been pretty easy to forget to delete all your events before you
+ re-initialize them, or otherwise put Libevent in an internally inconsistent
+ state. You can tell libevent to catch these and other common errors with
+ the new event_enable_debug_mode() call. Just invoke it before you do
+ any calls to other libevent functions, and it'll catch many common
+ event-level errors in your code.
+
+2.12. Functions to access all event fields
+
+ So that you don't have to access the struct event fields directly, Libevent
+ now provides accessor functions to retrieve everything from an event that
+ you set during event_new() or event_assign().
+
+3. Backend-specific and performance improvements.
+
+3.1. Change-minimization on O(1) backends
+
+ With previous versions of Libevent, if you called event_del() and
+ event_add() repeatedly on a single event between trips to the backend's
+ dispatch function, the backend might wind up making unnecessary calls or
+ passing unnecessary data to the kernel. The new backend logic batches up
+ redundant adds and deletes, and performs no more operations than necessary
+ at the kernel level.
+
+ This logic is on for the kqueue backend, and available (but off by
+ default) for the epoll backend. To turn it on for the epoll backend,
+ set the EVENT_BASE_FLAG_EPOLL_USE_CHANGELIST flag in the
+ event_base_cofig, or set the EVENT_EPOLL_USE_CHANGELIST environment
+ variable. Doing this with epoll may result in weird bugs if you give
+ any fds closed by dup() or its variants.
+
+3.2. Improved notification on Linux
+
+ When we need to wake the event loop up from another thread, we use
+ an epollfd to do so, instead of a socketpair. This is supposed to be
+ faster.
+
+3.3. Windows: better support for everything
+
+ Bufferevents on Windows can use a new mechanism (off-by-default; see below)
+ to send their data via Windows overlapped IO and get their notifications
+ via the IOCP API. This should be much faster than using event-based
+ notification.
+
+ Other functions throughout the code have been fixed to work more
+ consistently with Windows. Libevent now builds on Windows using either
+ mingw, or using MSVC (with nmake). Libevent works fine with UNICODE
+ defined, or not.
+
+ Data structures are a little smarter: our lookups from socket to pending
+ event are now done with O(1) hash tables rather than O(lg n) red-black
+ trees.
+
+ Unfortunately, the main Windows backend is still select()-based: from
+ testing the IOCP backends on the mailing list, it seems that there isn't
+ actually a way to tell for certain whether a socket is writable with IOCP.
+ Libevent 2.1 may add a multithreaded WaitForMultipleEvents-based
+ backend for better performance with many inactive sockets and better
+ integration with Windows events.
+
+4. Improvements to evbuffers
+
+ Libevent has long had an "evbuffer" implementation to wrap access to an
+ input or output memory buffer. In previous versions, the implementation
+ was very inefficient and lacked some desirable features. We've made many
+ improvements in Libevent 2.0.
+
+4.1. Chunked-memory internal representation
+
+ Previously, each evbuffer was a huge chunk of memory. When we ran out of
+ space in an evbuffer, we used realloc() to grow the chunk of memory. When
+ data was misaligned, we used memmove to move the data back to the front
+ of the buffer.
+
+ Needless to say, this is a terrible interface for networked IO.
+
+ Now, evbuffers are implemented as a linked list of memory chunks, like
+ most Unix kernels use for network IO. (See Linux's skbuf interfaces,
+ or *BSD's mbufs). Data is added at the end of the linked list and
+ removed from the front, so that we don't ever need realloc huge chunks
+ or memmove the whole buffer contents.
+
+ To avoid excessive calls to read and write, we use the readv/writev
+ interfaces (or WSASend/WSARecv on Windows) to do IO on multiple chunks at
+ once with a single system call.
+
+ COMPATIBILITY NOTE:
+ The evbuffer struct is no longer exposed in a header. The code here is
+ too volatile to expose an official evbuffer structure, and there was never
+ any means provided to create an evbuffer except via evbuffer_new which
+ heap-allocated the buffer.
+
+ If you need access to the whole buffer as a linear chunk of memory, the
+ EVBUFFER_DATA() function still works. Watch out, though: it needs to copy
+ the buffer's contents in a linear chunk before you can use it.
+
+4.2. More flexible readline support
+
+ The old evbuffer_readline() function (which accepted any sequence of
+ CR and LF characters as a newline, and which couldn't handle lines
+ containing NUL characters), is now deprecated. The preferred
+ function is evbuffer_readln(), which supports a variety of
+ line-ending styles, and which can return the number of characters in
+ the line returned.
+
+ You can also call evbuffer_search_eol() to find the end of a line
+ in an evbuffer without ever extracting the line.
+
+4.3. Support for file-based IO in evbuffers.
+
+ You can now add chunks of a file into a evbuffer, and Libevent will have
+ your OS use mapped-memory functionality, sendfile, or splice to transfer
+ the data without ever copying it to userspace. On OSs where this is not
+ supported, Libevent just loads the data.
+
+ There are probably some bugs remaining in this code. On some platforms
+ (like Windows), it just reads the relevant parts of the file into RAM.
+
+4.4. Support for zero-copy ("scatter/gather") writes in evbuffers.
+
+ You can add a piece of memory to an evbuffer without copying it.
+ Instead, Libevent adds a new element to the evbuffer's linked list of
+ chunks with a pointer to the memory you supplied. You can do this
+ either with a reference-counted chunk (via evbuffer_add_reference), or
+ by asking Libevent for a pointer to its internal vectors (via
+ evbuffer_reserve_space or evbuffer_peek()).
+
+4.5. Multiple callbacks per evbuffer
+
+ Previously, you could only have one callback active on an evbuffer at a
+ time. In practice, this meant that if one part of Libevent was using an
+ evbuffer callback to notice when an internal evbuffer was reading or
+ writing data, you couldn't have your own callback on that evbuffer.
+
+ Now, you can now use the evbuffer_add_cb() function to add a callback that
+ does not interfere with any other callbacks.
+
+ The evbuffer_setcb() function is now deprecated.
+
+4.6. New callback interface
+
+ Previously, evbuffer callbacks were invoked with the old size of the
+ buffer and the new size of the buffer. This interface could not capture
+ operations that simultaneously filled _and_ drained a buffer, or handle
+ cases where we needed to postpone callbacks until multiple operations were
+ complete.
+
+ Callbacks that are set with evbuffer_setcb still use the old API.
+ Callbacks added with evbuffer_add_cb() use a new interface that takes a
+ pointer to a struct holding the total number of bytes drained read and the
+ total number of bytes written. See event2/buffer.h for full details.
+
+4.7. Misc new evbuffer features
+
+ You can use evbuffer_remove() to move a given number of bytes from one
+ buffer to another.
+
+ The evbuffer_search() function lets you search for repeated instances of
+ a pattern inside an evbuffer.
+
+ You can use evbuffer_freeze() to temporarily suspend drains from or adds
+ to a given evbuffer. This is useful for code that exposes an evbuffer as
+ part of its public API, but wants users to treat it as a pure source or
+ sink.
+
+ There's an evbuffer_copyout() that looks at the data at the start of an
+ evbuffer without doing a drain.
+
+ You can have an evbuffer defer all of its callbacks, so that rather than
+ being invoked immediately when the evbuffer's length changes, they are
+ invoked from within the event_loop. This is useful when you have a
+ complex set of callbacks that can change the length of other evbuffers,
+ and you want to avoid having them recurse and overflow your stack.
+
+5. Bufferevents improvements
+
+ Libevent has long included a "bufferevents" structure and related
+ functions that were useful for generic buffered IO on a TCP connection.
+ This is what Libevent uses for its HTTP implementation. In addition to
+ the improvements that they get for free from the underlying evbuffer
+ implementation above, there are many new features in Libevent 2.0's
+ evbuffers.
+
+5.1. New OO implementations
+
+ The "bufferevent" structure is now an abstract base type with multiple
+ implementations. This should not break existing code, which always
+ allocated bufferevents with bufferevent_new().
+
+ Current implementations of the bufferevent interface are described below.
+
+5.2. bufferevent_socket_new() replaces bufferevent_new()
+
+ Since bufferevents that use a socket are not the only kind,
+ bufferevent_new() is now deprecated. Use bufferevent_socket_new()
+ instead.
+
+5.3. Filtered bufferevent IO
+
+ You can use bufferevent_filter_new() to create a bufferevent that wraps
+ around another bufferevent and transforms data it is sending and
+ receiving. See test/regress_zlib.c for a toy example that uses zlib to
+ compress data before sending it over a bufferevent.
+
+5.3. Linked pairs of bufferevents
+
+ You can use bufferevent_pair_new() to produce two linked
+ bufferevents. This is like using socketpair, but doesn't require
+ system-calls.
+
+5.4. SSL support for bufferevents with OpenSSL
+
+ There is now a bufferevent type that supports SSL/TLS using the
+ OpenSSL library. The code for this is build in a separate
+ library, libevent_openssl, so that your programs don't need to
+ link against OpenSSL unless they actually want SSL support.
+
+ There are two ways to construct one of these bufferevents, both
+ declared in <event2/bufferevent_ssl.h>. If you want to wrap an
+ SSL layer around an existing bufferevent, you would call the
+ bufferevent_openssl_filter_new() function. If you want to do SSL
+ on a socket directly, call bufferevent_openssl_socket_new().
+
+5.5. IOCP support for bufferevents on Windows
+
+ There is now a bufferevents backend that supports IOCP on Windows.
+ Supposedly, this will eventually make Windows IO much faster for
+ programs using bufferevents. We'll have to see; the code is not
+ currently optimized at all. To try it out, call the
+ event_base_start_iocp() method on an event_base before contructing
+ bufferevents.
+
+ This is tricky code; there are probably some bugs hiding here.
+
+5.6. Improved connect support for bufferevents.
+
+ You can now create a bufferevent that is not yet connected to any
+ host, and tell it to connect, either by address or by hostname.
+
+ The functions to do this are bufferevent_socket_connect and
+ bufferevent_socket_connect_hostname.
+
+5.7. Rate-limiting for bufferevents
+
+ If you need to limit the number of bytes read/written by a single
+ bufferevent, or by a group of them, you can do this with a new set of
+ bufferevent rate-limiting calls.
+
+6. Other improvements
+
+6.1. DNS improvements
+
+6.1.1. DNS: IPv6 nameservers
+
+ The evdns code now lets you have nameservers whose addresses are IPv6.
+
+6.1.2. DNS: Better security
+
+ Libevent 2.0 tries harder to resist DNS answer-sniping attacks than
+ earlier versions of evdns. See comments in the code for full details.
+
+ Notably, evdns now supports the "0x20 hack" to make it harder to
+ impersonate a DNS server. Additionally, Libevent now uses a strong
+ internal RNG to generate DNS transaction IDs, so you don't need to supply
+ your own.
+
+6.1.3. DNS: Getaddrinfo support
+
+ There's now an asynchronous getaddrinfo clone, evdns_getaddrinfo(),
+ to make the results of the evdns functions more usable. It doesn't
+ support every feature of a typical platform getaddrinfo() yet, but it
+ is quite close.
+
+ There is also a blocking evutil_getaddrinfo() declared in
+ event2/util.h, to provide a getaddrinfo() implementation for
+ platforms that don't have one, and smooth over the differences in
+ various platforms implementations of RFC3493.
+
+ Bufferevents provide bufferevent_connect_hostname(), which combines
+ the name lookup and connect operations.
+
+6.1.4. DNS: No more evdns globals
+
+ Like an event base, evdns operations are now supposed to use an evdns_base
+ argument. This makes them easier to wrap for other (more OO) languages,
+ and easier to control the lifetime of. The old evdns functions will
+ still, of course, continue working.
+
+6.2. Listener support
+
+ You can now more easily automate setting up a bound socket to listen for
+ TCP connections. Just use the evconnlistener_*() functions in the
+ event2/listener.h header.
+
+ The listener code supports IOCP on Windows if available.
+
+6.3. Secure RNG support
+
+ Network code very frequently needs a secure, hard-to-predict random number
+ generator. Some operating systems provide a good C implementation of one;
+ others do not. Libevent 2.0 now provides a consistent implementation
+ based on the arc4random code originally from OpenBSD. Libevent (and you)
+ can use the evutil_secure_rng_*() functions to access a fairly secure
+ random stream of bytes.
+
+6.4. HTTP
+
+ The evhttp uriencoding and uridecoding APIs have updated versions
+ that behave more correctly, and can handle strings with internal NULs.
+
+ The evhttp query parsing and URI parsing logic can now detect errors
+ more usefully. Moreover, we include an actual URI parsing function
+ (evhttp_uri_parse()) to correctly parse URIs, so as to discourage
+ people from rolling their own ad-hoc parsing functions.
+
+ There are now accessor functions for the useful fields of struct http
+ and friends; it shouldn't be necessary to access them directly any
+ more.
+
+ Libevent now lets you declare support for all specified HTTP methods,
+ including OPTIONS, PATCH, and so on. The default list is unchanged.
+
+ Numerous evhttp bugs also got fixed.
+
+7. Infrastructure improvements
+
+7.1. Better unit test framework
+
+ We now use a unit test framework that Nick wrote called "tinytest".
+ The main benefit from Libevent's point of view is that tests which
+ might mess with global state can all run each in their own
+ subprocess. This way, when there's a bug that makes one unit test
+ crash or mess up global state, it doesn't affect any others.
+
+7.2. Better unit tests
+
+ Despite all the code we've added, our unit tests are much better than
+ before. Right now, iterating over the different backends on various
+ platforms, I'm getting between 78% and 81% test coverage, compared
+ with less than 45% test coverage in Libevent 1.4.
+
diff --git a/libs/libevent/docs/whatsnew-2.1.txt b/libs/libevent/docs/whatsnew-2.1.txt
new file mode 100644
index 0000000000..0be54ae11b
--- /dev/null
+++ b/libs/libevent/docs/whatsnew-2.1.txt
@@ -0,0 +1,690 @@
+ What's new in Libevent 2.1
+ Nick Mathewson
+
+0. Before we start
+
+0.1. About this document
+
+ This document describes the key differences between Libevent 2.0 and
+ Libevent 2.1, from a user's point of view. It's a work in progress.
+
+ For better documentation about libevent, see the links at
+ http://libevent.org/
+
+ Libevent 2.1 would not be possible without the generous help of
+ numerous volunteers. For a list of who did what in Libevent 2.1,
+ please see the ChangeLog!
+
+ NOTE: I am very sure that I missed some thing on this list. Caveat
+ haxxor.
+
+0.2. Where to get help
+
+ Try looking at the other documentation too. All of the header files
+ have documentation in the doxygen format; this gets turned into nice
+ HTML and linked to from the libevent.org website.
+
+ There is a work-in-progress book with reference manual at
+ http://www.wangafu.net/~nickm/libevent-book/ .
+
+ You can ask questions on the #libevent IRC channel at irc.oftc.net or
+ on the mailing list at libevent-users@freehaven.net. The mailing list
+ is subscribers-only, so you will need to subscribe before you post.
+
+0.3. Compatibility
+
+ Our source-compatibility policy is that correct code (that is to say,
+ code that uses public interfaces of Libevent and relies only on their
+ documented behavior) should have forward source compatibility: any
+ such code that worked with a previous version of Libevent should work
+ with this version too.
+
+ We don't try to do binary compatibility except within stable release
+ series, so binaries linked against any version of Libevent 2.0 will
+ probably need to be recompiled against Libevent 2.1.4-alpha if you
+ want to use it. It is probable that we'll break binary compatibility
+ again before Libevent 2.1 is stable.
+
+1. New APIs and features
+
+1.1. New ways to build libevent
+
+ We now provide an --enable-gcc-hardening configure option to turn on
+ GCC features designed for increased code security.
+
+ There is also an --enable-silent-rules configure option to make
+ compilation run more quietly with automake 1.11 or later.
+
+ You no longer need to use the --enable-gcc-warnings option to turn on
+ all of the GCC warnings that Libevent uses. The only change from
+ using that option now is to turn warnings into errors.
+
+ For IDE users, files that are not supposed to be built are now
+ surrounded with appropriate #ifdef lines to keep your IDE from getting
+ upset.
+
+ There is now an alternative cmake-based build process; cmake users
+ should see the relevant sections in the README.
+
+
+1.2. New functions for events and the event loop
+
+ If you're running Libevent with multiple event priorities, you might
+ want to make sure that Libevent checks for new events frequently, so
+ that time-consuming or numerous low-priority events don't keep it from
+ checking for new high-priority events. You can now use the
+ event_config_set_max_dispatch_interval() interface to ensure that the
+ loop checks for new events either every N microseconds, every M
+ callbacks, or both.
+
+ When configuring an event base, you can now choose whether you want
+ timers to be more efficient, or more precise. (This only has effect
+ on Linux for now.) Timers are efficient by default: to select more
+ precise timers, use the EVENT_BASE_FLAG_PRECISE_TIMER flag when
+ constructing the event_config, or set the EVENT_PRECISE_TIMER
+ environment variable to a non-empty string.
+
+ There is an EVLOOP_NO_EXIT_ON_EMPTY flag that tells event_base_loop()
+ to keep looping even when there are no pending events. (Ordinarily,
+ event_base_loop() will exit as soon as no events are pending.)
+
+ Past versions of Libevent have been annoying to use with some
+ memory-leak-checking tools, because Libevent allocated some global
+ singletons but provided no means to free them. There is now a
+ function, libevent_global_shutdown(), that you can use to free all
+ globally held resources before exiting, so that your leak-check tools
+ don't complain. (Note: this function doesn't free non-global things
+ like events, bufferevents, and so on; and it doesn't free anything
+ that wouldn't otherwise get cleaned up by the operating system when
+ your process exit()s. If you aren't using a leak-checking tool, there
+ is not much reason to call libevent_global_shutdown().)
+
+ There is a new event_base_get_npriorities() function to return the
+ number of priorities set in the event base.
+
+ Libevent 2.0 added an event_new() function to construct a new struct
+ event on the heap. Unfortunately, with event_new(), there was no
+ equivalent for:
+
+ struct event ev;
+ event_assign(&ev, base, fd, EV_READ, callback, &ev);
+
+ In other words, there was no easy way for event_new() to set up an
+ event so that the event itself would be its callback argument.
+ Libevent 2.1 lets you do this by passing "event_self_cbarg()" as the
+ callback argument:
+
+ struct event *evp;
+ evp = event_new(base, fd, EV_READ, callback,
+ event_self_cbarg());
+
+ There's also a new event_base_get_running_event() function you can
+ call from within a Libevent callback to get a pointer to the current
+ event. This should never be strictly necessary, but it's sometimes
+ convenient.
+
+ The event_base_once() function used to leak some memory if the event
+ that it added was never actually triggered. Now, its memory is
+ tracked in the event_base and freed when the event_base is freed.
+ Note however that Libevent doesn't know how to free any information
+ passed as the callback argument to event_base_once is still something
+ you'll might need a way to de-allocate yourself.
+
+ There is an event_get_priority() function to return an event's
+ priority.
+
+ By analogy to event_base_loopbreak(), there is now an
+ event_base_loopcontinue() that tells Libevent to stop processing
+ active event callbacks, and re-scan for new events right away.
+
+ There's a function, event_base_foreach_event(), that can iterate over
+ every event currently pending or active on an event base, and invoke a
+ user-supplied callback on each. The callback must not alter the events
+ or add or remove anything to the event base.
+
+ We now have an event_remove_timer() function to remove the timeout on
+ an event while leaving its socket and/or signal triggers unchanged.
+ (If we were designing the API from scratch, this would be the behavior
+ of "event_add(ev, NULL)" on an already-added event with a timeout. But
+ that's a no-op in past versions of Libevent, and we don't want to
+ break compatibility.)
+
+ You can use the new event_base_get_num_events() function to find the
+ number of events active or pending on an event_base. To find the
+ largest number of events that there have been since the last call, use
+ event_base_get_max_events().
+
+ You can now activate all the events waiting for a given fd or signal
+ using the event_base_active_by_fd() and event_base_active_by_signal()
+ APIs.
+
+ On backends that support it (currently epoll), there is now an
+ EV_CLOSED flag that programs can use to detect when a socket has
+ closed without having to read all the bytes until receiving an EOF.
+
+1.3. Event finalization
+
+ [NOTE: This is an experimental feature in Libevent 2.1.3-alpha. Though
+ it seems solid so far, its API might change between now and the first
+ release candidate for Libevent 2.1.]
+
+1.3.1. Why event finalization?
+
+ Libevent 2.1 now supports an API for safely "finalizing" events that
+ might be running in multiple threads, and provides a way to slightly
+ change the semantics of event_del() to prevent deadlocks in
+ multithreaded programs.
+
+ To motivate this feature, consider the following code, in the context
+ of a mulithreaded Libevent application:
+
+ struct connection *conn = event_get_callback_arg(ev);
+ event_del(ev);
+ connection_free(conn);
+
+ Suppose that the event's callback might be running in another thread,
+ and using the value of "conn" concurrently. We wouldn't want to
+ execute the connection_free() call until "conn" is no longer in use.
+ How can we make this code safe?
+
+ Libevent 2.0 answered that question by saying that the event_del()
+ call should block if the event's callback is running in another
+ thread. That way, we can be sure that event_del() has canceled the
+ callback (if the callback hadn't started running yet), or has waited
+ for the callback to finish.
+
+ But now suppose that the data structure is protected by a lock, and we
+ have the following code:
+
+ void check_disable(struct connection *connection) {
+ lock(connection);
+ if (should_stop_reading(connection))
+ event_del(connection->read_event);
+ unlock(connection);
+ }
+
+ What happens when we call check_disable() from a callback and from
+ another thread? Let's say that the other thread gets the lock
+ first. If it decides to call event_del(), it will wait for the
+ callback to finish. But meanwhile, the callback will be waiting for
+ the lock on the connection. Since each threads is waiting for the
+ other one to release a resource, the program will deadlock.
+
+ This bug showed up in multithreaded bufferevent programs in 2.1,
+ particularly when freeing bufferevents. (For more information, see
+ the "Deadlock when calling bufferevent_free from an other thread"
+ thread on libevent-users starting on 6 August 2012 and running through
+ February of 2013. You might also like to read my earlier writeup at
+ http://archives.seul.org/libevent/users/Feb-2012/msg00053.html and
+ the ensuing discussion.)
+
+1.3.2. The EV_FINALIZE flag and avoiding deadlock
+
+ To prevent the deadlock condition described above, Libevent
+ 2.1.3-alpha adds a new flag, "EV_FINALIZE". You can pass it to
+ event_new() and event_assign() along with EV_READ, EV_WRITE, and the
+ other event flags.
+
+ When an event is constructed with the EV_FINALIZE flag, event_del()
+ will not block on that event, even when the event's callback is
+ running in another thread. By using EV_FINALIZE, you are therefore
+ promising not to use the "event_del(ev); free(event_get_callback_arg(ev));"
+ pattern, but rather to use one of the finalization functions below to
+ clean up the event.
+
+ EV_FINALIZE has no effect on a single-threaded program, or on a
+ program where events are only used from one thread.
+
+
+ There are also two new variants of event_del() that you can use for
+ more fine-grained control:
+ event_del_noblock(ev)
+ event_del_block(ev)
+ The event_del_noblock() function will never block, even if the event
+ callback is running in another thread and doesn't have the EV_FINALIZE
+ flag. The event_del_block() function will _always_ block if the event
+ callback is running in another thread, even if the event _does_ have
+ the EV_FINALIZE flag.
+
+ [A future version of Libevent may have a way to make the EV_FINALIZE
+ flag the default.]
+
+1.3.3. Safely finalizing events
+
+ To safely tear down an event that may be running, Libevent 2.1.3-alpha
+ introduces event_finalize() and event_free_finalize(). You call them
+ on an event, and provide a finalizer callback to be run on the event
+ and its callback argument once the event is definitely no longer
+ running.
+
+ With event_free_finalize(), the event is also freed once the finalizer
+ callback has been invoked.
+
+ A finalized event cannot be re-added or activated. The finalizer
+ callback must not add events, activate events, or attempt to
+ "resucitate" the event being finalized in any way.
+
+ If any finalizer callbacks are pending as the event_base is being
+ freed, they will be invoked. You can override this behavior with the
+ new function event_base_free_nofinalize().
+
+1.4. New debugging features
+
+ You can now turn on debug logs at runtime using a new function,
+ event_enable_debug_logging().
+
+ The event_enable_lock_debugging() function is now spelled correctly.
+ You can still use the old "event_enable_lock_debuging" name, though,
+ so your old programs shouldnt' break.
+
+ There's also been some work done to try to make the debugging logs
+ more generally useful.
+
+1.5. New evbuffer functions
+
+ In Libevent 2.0, we introduced evbuffer_add_file() to add an entire
+ file's contents to an evbuffer, and then send them using sendfile() or
+ mmap() as appropriate. This API had some drawbacks, however.
+ Notably, it created one mapping or fd for every instance of the same
+ file added to any evbuffer. Also, adding a file to an evbuffer could
+ make that buffer unusable with SSL bufferevents, filtering
+ bufferevents, and any code that tried to read the contents of the
+ evbuffer.
+
+ Libevent 2.1 adds a new evbuffer_file_segment API to solve these
+ problems. Now, you can use evbuffer_file_segment_new() to construct a
+ file-segment object, and evbuffer_add_file_segment() to insert it (or
+ part of it) into an evbuffer. These segments avoid creating redundant
+ maps or fds. Better still, the code is smart enough (when the OS
+ supports sendfile) to map the file when that's necessary, and use
+ sendfile() otherwise.
+
+ File segments can receive callback functions that are invoked when the
+ file segments are freed.
+
+ The evbuffer_ptr interface has been extended so that an evbuffer_ptr
+ can now yield a point just after the end of the buffer. This makes
+ many algorithms simpler to implement.
+
+ There's a new evbuffer_add_buffer() interface that you can use to add
+ one buffer to another nondestructively. When you say
+ evbuffer_add_buffer_reference(outbuf, inbuf), outbuf now contains a
+ reference to the contents of inbuf.
+
+ To aid in adding data in bulk while minimizing evbuffer calls, there
+ is an evbuffer_add_iovec() function.
+
+ There's a new evbuffer_copyout_from() variant function to enable
+ copying data nondestructively from the middle of a buffer.
+
+ evbuffer_readln() now supports an EVBUFFER_EOL_NUL argument to fetch
+ NUL-terminated strings from buffers.
+
+1.6. New functions and features: bufferevents
+
+ You can now use the bufferevent_getcb() function to find out a
+ bufferevent's callbacks. Previously, there was no supported way to do
+ that.
+
+ The largest chunk readable or writeable in a single bufferevent
+ callback is no longer hardcoded; it's now configurable with
+ the new functions bufferevent_set_max_single_read() and
+ bufferevent_set_max_single_write().
+
+ For consistency, OpenSSL bufferevents now make sure to always set one
+ of BEV_EVENT_READING or BEV_EVENT_WRITING when invoking an event
+ callback.
+
+ Calling bufferevent_set_timeouts(bev, NULL, NULL) now removes the
+ timeouts from socket and ssl bufferevents correctly.
+
+ You can find the priority at which a bufferevent runs with
+ bufferevent_get_priority().
+
+ The function bufferevent_get_token_bucket_cfg() can retrieve the
+ rate-limit settings for a bufferevent; bufferevent_getwatermark() can
+ return a bufferevent's current watermark settings.
+
+ You can manually trigger a bufferevent's callbacks via
+ bufferevent_trigger() and bufferevent_trigger_event().
+
+1.7. New functions and features: evdns
+
+ The previous evdns interface used an "open a test UDP socket" trick in
+ order to detect IPv6 support. This was a hack, since it would
+ sometimes badly confuse people's firewall software, even though no
+ packets were sent. The current evdns interface-detection code uses
+ the appropriate OS functions to see which interfaces are configured.
+
+ The evdns_base_new() function now has multiple possible values for its
+ second (flags) argument. Using 1 and 0 have their old meanings, though the
+ 1 flag now has a symbolic name of EVDNS_BASE_INITIALIZE_NAMESERVERS.
+ A second flag is now supported too: the EVDNS_BASE_DISABLE_WHEN_INACTIVE
+ flag, which tells the evdns_base that it should not prevent Libevent from
+ exiting while it has no DNS requests in progress.
+
+ There is a new evdns_base_clear_host_addresses() function to remove
+ all the /etc/hosts addresses registered with an evdns instance.
+
+1.8. New functions and features: evconnlistener
+
+ Libevent 2.1 adds the following evconnlistener flags:
+
+ LEV_OPT_DEFERRED_ACCEPT -- Tells the OS that it doesn't need to
+ report sockets as having arrived until the initiator has sent some
+ data too. This can greatly improve performance with protocols like
+ HTTP where the client always speaks first. On operating systems
+ that don't support this functionality, this option has no effect.
+
+ LEV_OPT_DISABLED -- Creates an evconnlistener in the disabled (not
+ listening) state.
+
+ Libevent 2.1 changes the behavior of the LEV_OPT_CLOSE_ON_EXEC
+ flag. Previously, it would apply to the listener sockets, but not to
+ the accepted sockets themselves. That's almost never what you want.
+ Now, it applies both to the listener and the accepted sockets.
+
+1.9. New functions and features: evhttp
+
+ **********************************************************************
+ NOTE: The evhttp module will eventually be deprecated in favor of Mark
+ Ellzey's libevhtp library. Don't worry -- this won't happen until
+ libevhtp provides every feature that evhttp does, and provides a
+ compatible interface that applications can use to migrate.
+ **********************************************************************
+
+ Previously, you could only set evhttp timeouts in increments of one
+ second. Now, you can use evhttp_set_timeout_tv() and
+ evhttp_connection_set_timeout_tv() to configure
+ microsecond-granularity timeouts.
+
+ There are a new pair of functions: evhttp_set_bevcb() and
+ evhttp_connection_base_bufferevent_new(), that you can use to
+ configure which bufferevents will be used for incoming and outgoing
+ http connections respectively. These functions, combined with SSL
+ bufferevents, should enable HTTPS support.
+
+ There's a new evhttp_foreach_bound_socket() function to iterate over
+ every listener on an evhttp object.
+
+ Whitespace between lines in headers is now folded into a single space;
+ whitespace at the end of a header is now removed.
+
+ The socket errno value is now preserved when invoking an http error
+ callback.
+
+ There's a new kind of request callback for errors; you can set it with
+ evhttp_request_set_error_cb(). It gets called when there's a request error,
+ and actually reports the error code and lets you figure out which request
+ failed.
+
+ You can navigate from an evhttp_connection back to its evhttp with the
+ new evhttp_connection_get_server() function.
+
+ You can override the default HTTP Content-Type with the new
+ evhttp_set_default_content_type() function
+
+ There's a new evhttp_connection_get_addr() API to return the peer
+ address of an evhttp_connection.
+
+ The new evhttp_send_reply_chunk_with_cb() is a variant of
+ evhttp_send_reply_chunk() with a callback to be invoked when the
+ chunk is sent.
+
+ The evhttp_request_set_header_cb() facility adds a callback to be
+ invoked while parsing headers.
+
+ The evhttp_request_set_on_complete_cb() facility adds a callback to be
+ invoked on request completion.
+
+1.10. New functions and features: evutil
+
+ There's a function "evutil_secure_rng_set_urandom_device_file()" that
+ you can use to override the default file that Libevent uses to seed
+ its (sort-of) secure RNG.
+
+2. Cross-platform performance improvements
+
+2.1. Better data structures
+
+ We replaced several users of the sys/queue.h "TAILQ" data structure
+ with the "LIST" data structure. Because this data type doesn't
+ require FIFO access, it requires fewer pointer checks and
+ manipulations to keep it in line.
+
+ All previous versions of Libevent have kept every pending (added)
+ event in an "eventqueue" data structure. Starting in Libevent 2.0,
+ however, this structure became redundant: every pending timeout event
+ is stored in the timeout heap or in one of the common_timeout queues,
+ and every pending fd or signal event is stored in an evmap. Libevent
+ 2.1 removes this data structure, and thereby saves all of the code
+ that we'd been using to keep it updated.
+
+2.2. Faster activations and timeouts
+
+ It's a common pattern in older code to use event_base_once() with a
+ 0-second timeout to ensure that a callback will get run 'as soon as
+ possible' in the current iteration of the Libevent loop. We optimize
+ this case by calling event_active() directly, and bypassing the
+ timeout pool. (People who are using this pattern should also consider
+ using event_active() themselves.)
+
+ Libevent 2.0 would wake up a polling event loop whenever the first
+ timeout in the event loop was adjusted--whether it had become earlier
+ or later. We now only notify the event loop when a change causes the
+ expiration time to become _sooner_ than it would have been otherwise.
+
+ The timeout heap code is now optimized to perform fewer comparisons
+ and shifts when changing or removing a timeout.
+
+ Instead of checking for a wall-clock time jump every time we call
+ clock_gettime(), we now check only every 5 seconds. This should save
+ a huge number of gettimeofday() calls.
+
+2.3. Microoptimizations
+
+ Internal event list maintainance no longer use the antipattern where
+ we have one function with multiple totally independent behaviors
+ depending on an argument:
+ #define OP1 1
+ #define OP2 2
+ #define OP3 3
+ void func(int operation, struct event *ev) {
+ switch (op) {
+ ...
+ }
+ }
+ Instead, these functions are now split into separate functions for
+ each operation:
+ void func_op1(struct event *ev) { ... }
+ void func_op2(struct event *ev) { ... }
+ void func_op3(struct event *ev) { ... }
+
+ This produces better code generation and inlining decisions on some
+ compilers, and makes the code easier to read and check.
+
+2.4. Evbuffer performance improvements
+
+ The EVBUFFER_EOL_CRLF line-ending type is now much faster, thanks to
+ smart optimizations.
+
+2.5. HTTP performance improvements
+
+ o Performance tweak to evhttp_parse_request_line. (aee1a97 Mark Ellzey)
+ o Add missing break to evhttp_parse_request_line (0fcc536)
+
+2.6. Coarse timers by default on Linux
+
+ Due to limitations of the epoll interface, Libevent programs using epoll
+ have not previously been able to wait for timeouts with accuracy smaller
+ than 1 millisecond. But Libevent had been using CLOCK_MONOTONIC for
+ timekeeping on Linux, which is needlessly expensive: CLOCK_MONOTONIC_COARSE
+ has approximately the resolution corresponding to epoll, and is much faster
+ to invoke than CLOCK_MONOTONIC.
+
+ To disable coarse timers, and get a more plausible precision, use the
+ new EVENT_BASE_FLAG_PRECISE_TIMER flag when setting up your event base.
+
+3. Backend/OS-specific improvements
+
+3.1. Linux-specific improvements
+
+ The logic for deciding which arguements to use with epoll_ctl() is now
+ a table-driven lookup, rather than the previous pile of cascading
+ branches. This should minimize epoll_ctl() calls and make the epoll
+ code run a little faster on change-heavy loads.
+
+ Libevent now takes advantage of Linux's support for enhanced APIs
+ (e.g., SOCK_CLOEXEC, SOCK_NONBLOCK, accept4, pipe2) that allow us to
+ simultaneously create a socket, make it nonblocking, and make it
+ close-on-exec. This should save syscalls throughout our codebase, and
+ avoid race-conditions if an exec() occurs after a socket is socket is
+ created but before we can make it close-on-execute on it.
+
+3.2. Windows-specific improvements
+
+ We now use GetSystemTimeAsFileTime to implement gettimeofday. It's
+ significantly faster and more accurate than our old ftime()-based approach.
+
+3.3. Improvements in the solaris evport backend.
+
+ The evport backend has been updated to use many of the infrastructure
+ improvements from Libevent 2.0. Notably, it keeps track of per-fd
+ information using the evmap infrastructure, and removes a number of
+ linear scans over recently-added events. This last change makes it
+ efficient to receive many more events per evport_getn() call, thereby
+ reducing evport overhead in general.
+
+3.4. OSX backend improvements
+
+ The OSX select backend doesn't like to have more than a certain number
+ of fds set unless an "unlimited select" option has been set.
+ Therefore, we now set it.
+
+3.5. Monotonic clocks on even more platforms
+
+ Libevent previously used a monotonic clock for its internal timekeeping
+ only on platforms supporting the POSIX clock_gettime() interface. Now,
+ Libevent has support for monotonic clocks on OSX and Windows too, and a
+ fallback implementation for systems without monotonic clocks that will at
+ least keep time running forwards.
+
+ Using monotonic timers makes Libevent more resilient to changes in the
+ system time, as can happen in small amounts due to clock adjustments from
+ NTP, or in large amounts due to users who move their system clocks all over
+ the timeline in order to keep nagware from nagging them.
+
+3.6. Faster cross-thread notification on kqueue
+
+ When a thread other than the one in which the main event loop is
+ running needs to wake the thread running the main event loop, Libevent
+ usually writes to a socketpair in order to force the main event loop
+ to wake up. On Linux, we've been able to use eventfd() instead. Now
+ on BSD and OSX systems (any anywhere else that has kqueue with the
+ EVFILT_USER extension), we can use EVFILT_USER to wake up the main
+ thread from kqueue. This should be a tiny bit faster than the
+ previous approach.
+
+4. Infrastructure improvements
+
+4.1. Faster tests
+
+ I've spent some time to try to make the unit tests run faster in
+ Libevent 2.1. Nearly all of this was a matter of searching slow tests
+ for unreasonably long timeouts, and cutting them down to reasonably
+ long delays, though on one or two cases I actually had to parallelize
+ an operation or improve an algorithm.
+
+ On my desktop, a full "make verify" run of Libevent 2.0.18-stable
+ requires about 218 seconds. Libevent 2.1.1-alpha cuts this down to
+ about 78 seconds.
+
+ Faster unit tests are great, since they let programmers test their
+ changes without losing their train of thought.
+
+4.2. Finicky tests are now off-by-default
+
+ The Tinytest unit testing framework now supports optional tests, and
+ Libevent uses them. By default, Libevent's unit testing framework
+ does not run tests that require a working network, and does not run
+ tests that tend to fail on heavily loaded systems because of timing
+ issues. To re-enable all tests, run ./test/regress using the "@all"
+ alias.
+
+4.3. Modernized use of autotools
+
+ Our autotools-based build system has been updated to build without
+ warnings on recent autoconf/automake versions.
+
+ Libevent's autotools makefiles are no longer recursive. This allows
+ make to use the maximum possible parallelism to do the minimally
+ necessary amount of work. See Peter Miller's "Recursive Make
+ Considered Harmful" at http://miller.emu.id.au/pmiller/books/rmch/ for
+ more information here.
+
+ We now use the "quiet build" option to suppress distracting messages
+ about which commandlines are running. You can get them back with
+ "make V=1".
+
+4.4. Portability
+
+ Libevent now uses large-file support internally on platforms where it
+ matters. You shouldn't need to set _LARGEFILE or OFFSET_BITS or
+ anything magic before including the Libevent headers, either, since
+ Libevent now sets the size of ev_off_t to the size of off_t that it
+ received at compile time, not to some (possibly different) size based
+ on current macro definitions when your program is building.
+
+ We now also use the Autoconf AC_USE_SYSTEM_EXTENSIONS mechanism to
+ enable per-system macros needed to enable not-on-by-default features.
+ Unlike the rest of the autoconf macros, we output these to an
+ internal-use-only evconfig-private.h header, since their names need to
+ survive unmangled. This lets us build correctly on more platforms,
+ and avoid inconsistencies when some files define _GNU_SOURCE and
+ others don't.
+
+ Libevent now tries to detect OpenSSL via pkg-config.
+
+4.5. Standards conformance
+
+ Previous Libevent versions had no consistent convention for internal
+ vs external identifiers, and used identifiers starting with the "_"
+ character throughout the codebase. That's no good, since the C
+ standard says that identifiers beginning with _ are reserved. I'm not
+ aware of having any collisions with system identifiers, but it's best
+ to fix these things before they cause trouble.
+
+ We now avoid all use of the _identifiers in the Libevent source code.
+ These changes were made *mainly* through the use of automated scripts,
+ so there shouldn't be any mistakes, but you never know.
+
+ As an exception, the names _EVENT_LOG_DEBUG, _EVENT_LOG_MSG_,
+ _EVENT_LOG_WARN, and _EVENT_LOG_ERR are still exposed in event.h: they
+ are now deprecated, but to support older code, they will need to stay
+ around for a while. New code should use EVENT_LOG_DEBUG,
+ EVENT_LOG_MSG, EVENT_LOG_WARN, and EVENT_LOG_ERR instead.
+
+4.6. Event and callback refactoring
+
+ As a simplification and optimization to Libevent's "deferred callback"
+ logic (introduced in 2.0 to avoid callback recursion), Libevent now
+ treats all of its deferrable callback types using the same logic it
+ uses for active events. Now deferred events no longer cause priority
+ inversion, no longer require special code to cancel them, and so on.
+
+ Regular events and deferred callbacks now both descend from an
+ internal light-weight event_callback supertype, and both support
+ priorities and take part in the other anti-priority-inversion
+ mechanisms in Libevent.
+
+ To avoid starvation from callback recursion (which was the reason we
+ introduced "deferred callbacks" in the first place) the implementation
+ now allows an event callback to be scheduled as "active later":
+ instead of running in the current iteration of the event loop, it runs
+ in the next one.
+
+5. Testing
+
+ Libevent's test coverage level is more or less unchanged since before:
+ we still have over 80% line coverage in our tests on Linux and OSX.
+ There are some under-tested modules, though: we need to fix those.
diff --git a/libs/libevent/include/evconfig-private.h b/libs/libevent/include/evconfig-private.h
new file mode 100644
index 0000000000..5eff5500f5
--- /dev/null
+++ b/libs/libevent/include/evconfig-private.h
@@ -0,0 +1,35 @@
+
+#ifndef EVCONFIG_PRIVATE_H_INCLUDED_
+#define EVCONFIG_PRIVATE_H_INCLUDED_
+
+/* Enable extensions on AIX 3, Interix. */
+/* #undef _ALL_SOURCE */
+
+/* Enable GNU extensions on systems that have them. */
+/* #undef _GNU_SOURCE */
+
+/* Enable threading extensions on Solaris. */
+/* #undef _POSIX_PTHREAD_SEMANTICS */
+
+/* Enable extensions on HP NonStop. */
+/* #undef _TANDEM_SOURCE */
+
+/* Enable general extensions on Solaris. */
+/* #undef __EXTENSIONS__ */
+
+/* Number of bits in a file offset, on hosts where this is settable. */
+/* #undef _FILE_OFFSET_BITS */
+/* Define for large files, on AIX-style hosts. */
+/* #undef _LARGE_FILES */
+
+/* Define to 1 if on MINIX. */
+/* #undef _MINIX */
+
+/* Define to 2 if the system does not provide POSIX.1 features except with
+ this defined. */
+/* #undef _POSIX_1_SOURCE */
+
+/* Define to 1 if you need to in order for `stat' and other things to work. */
+/* #undef _POSIX_SOURCE */
+
+#endif
diff --git a/libs/libevent/include/evdns.h b/libs/libevent/include/evdns.h
new file mode 100644
index 0000000000..8672db0369
--- /dev/null
+++ b/libs/libevent/include/evdns.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT1_EVDNS_H_INCLUDED_
+#define EVENT1_EVDNS_H_INCLUDED_
+
+/** @file evdns.h
+
+ A dns subsystem for Libevent.
+
+ The <evdns.h> header is deprecated in Libevent 2.0 and later; please
+ use <event2/evdns.h> instead. Depending on what functionality you
+ need, you may also want to include more of the other <event2/...>
+ headers.
+ */
+
+#include <event.h>
+#include <event2/dns.h>
+#include <event2/dns_compat.h>
+#include <event2/dns_struct.h>
+
+#endif /* EVENT1_EVDNS_H_INCLUDED_ */
diff --git a/libs/libevent/include/event.h b/libs/libevent/include/event.h
new file mode 100644
index 0000000000..ba5186713b
--- /dev/null
+++ b/libs/libevent/include/event.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT1_EVENT_H_INCLUDED_
+#define EVENT1_EVENT_H_INCLUDED_
+
+/** @file event.h
+
+ A library for writing event-driven network servers.
+
+ The <event.h> header is deprecated in Libevent 2.0 and later; please
+ use <event2/event.h> instead. Depending on what functionality you
+ need, you may also want to include more of the other event2/
+ headers.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <event2/event-config.h>
+#ifdef EVENT__HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef EVENT__HAVE_STDINT_H
+#include <stdint.h>
+#endif
+#include <stdarg.h>
+
+/* For int types. */
+#include <evutil.h>
+
+#ifdef _WIN32
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#endif
+#include <winsock2.h>
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+
+#include <event2/event_struct.h>
+#include <event2/event.h>
+#include <event2/event_compat.h>
+#include <event2/buffer.h>
+#include <event2/buffer_compat.h>
+#include <event2/bufferevent.h>
+#include <event2/bufferevent_struct.h>
+#include <event2/bufferevent_compat.h>
+#include <event2/tag.h>
+#include <event2/tag_compat.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT1_EVENT_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/buffer.h b/libs/libevent/include/event2/buffer.h
new file mode 100644
index 0000000000..468588b9f1
--- /dev/null
+++ b/libs/libevent/include/event2/buffer.h
@@ -0,0 +1,1076 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_BUFFER_H_INCLUDED_
+#define EVENT2_BUFFER_H_INCLUDED_
+
+/** @file event2/buffer.h
+
+ Functions for buffering data for network sending or receiving.
+
+ An evbuffer can be used for preparing data before sending it to
+ the network or conversely for reading data from the network.
+ Evbuffers try to avoid memory copies as much as possible. As a
+ result, evbuffers can be used to pass data around without actually
+ incurring the overhead of copying the data.
+
+ A new evbuffer can be allocated with evbuffer_new(), and can be
+ freed with evbuffer_free(). Most users will be using evbuffers via
+ the bufferevent interface. To access a bufferevent's evbuffers, use
+ bufferevent_get_input() and bufferevent_get_output().
+
+ There are several guidelines for using evbuffers.
+
+ - if you already know how much data you are going to add as a result
+ of calling evbuffer_add() multiple times, it makes sense to use
+ evbuffer_expand() first to make sure that enough memory is allocated
+ before hand.
+
+ - evbuffer_add_buffer() adds the contents of one buffer to the other
+ without incurring any unnecessary memory copies.
+
+ - evbuffer_add() and evbuffer_add_buffer() do not mix very well:
+ if you use them, you will wind up with fragmented memory in your
+ buffer.
+
+ - For high-performance code, you may want to avoid copying data into and out
+ of buffers. You can skip the copy step by using
+ evbuffer_reserve_space()/evbuffer_commit_space() when writing into a
+ buffer, and evbuffer_peek() when reading.
+
+ In Libevent 2.0 and later, evbuffers are represented using a linked
+ list of memory chunks, with pointers to the first and last chunk in
+ the chain.
+
+ As the contents of an evbuffer can be stored in multiple different
+ memory blocks, it cannot be accessed directly. Instead, evbuffer_pullup()
+ can be used to force a specified number of bytes to be contiguous. This
+ will cause memory reallocation and memory copies if the data is split
+ across multiple blocks. It is more efficient, however, to use
+ evbuffer_peek() if you don't require that the memory to be contiguous.
+ */
+
+#include <event2/visibility.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <event2/event-config.h>
+#include <stdarg.h>
+#ifdef EVENT__HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef EVENT__HAVE_SYS_UIO_H
+#include <sys/uio.h>
+#endif
+#include <event2/util.h>
+
+/**
+ An evbuffer is an opaque data type for efficiently buffering data to be
+ sent or received on the network.
+
+ @see event2/event.h for more information
+*/
+struct evbuffer
+#ifdef EVENT_IN_DOXYGEN_
+{}
+#endif
+;
+
+/**
+ Pointer to a position within an evbuffer.
+
+ Used when repeatedly searching through a buffer. Calling any function
+ that modifies or re-packs the buffer contents may invalidate all
+ evbuffer_ptrs for that buffer. Do not modify or contruct these values
+ except with evbuffer_ptr_set.
+
+ An evbuffer_ptr can represent any position from the start of a buffer up
+ to a position immediately after the end of a buffer.
+
+ @see evbuffer_ptr_set()
+ */
+struct evbuffer_ptr {
+ ev_ssize_t pos;
+
+ /* Do not alter or rely on the values of fields: they are for internal
+ * use */
+ struct {
+ void *chain;
+ size_t pos_in_chain;
+ } internal_;
+};
+
+/** Describes a single extent of memory inside an evbuffer. Used for
+ direct-access functions.
+
+ @see evbuffer_reserve_space, evbuffer_commit_space, evbuffer_peek
+ */
+#ifdef EVENT__HAVE_SYS_UIO_H
+#define evbuffer_iovec iovec
+/* Internal use -- defined only if we are using the native struct iovec */
+#define EVBUFFER_IOVEC_IS_NATIVE_
+#else
+struct evbuffer_iovec {
+ /** The start of the extent of memory. */
+ void *iov_base;
+ /** The length of the extent of memory. */
+ size_t iov_len;
+};
+#endif
+
+/**
+ Allocate storage for a new evbuffer.
+
+ @return a pointer to a newly allocated evbuffer struct, or NULL if an error
+ occurred
+ */
+EVENT2_EXPORT_SYMBOL
+struct evbuffer *evbuffer_new(void);
+/**
+ Deallocate storage for an evbuffer.
+
+ @param buf pointer to the evbuffer to be freed
+ */
+EVENT2_EXPORT_SYMBOL
+void evbuffer_free(struct evbuffer *buf);
+
+/**
+ Enable locking on an evbuffer so that it can safely be used by multiple
+ threads at the same time.
+
+ NOTE: when locking is enabled, the lock will be held when callbacks are
+ invoked. This could result in deadlock if you aren't careful. Plan
+ accordingly!
+
+ @param buf An evbuffer to make lockable.
+ @param lock A lock object, or NULL if we should allocate our own.
+ @return 0 on success, -1 on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_enable_locking(struct evbuffer *buf, void *lock);
+
+/**
+ Acquire the lock on an evbuffer. Has no effect if locking was not enabled
+ with evbuffer_enable_locking.
+*/
+EVENT2_EXPORT_SYMBOL
+void evbuffer_lock(struct evbuffer *buf);
+
+/**
+ Release the lock on an evbuffer. Has no effect if locking was not enabled
+ with evbuffer_enable_locking.
+*/
+EVENT2_EXPORT_SYMBOL
+void evbuffer_unlock(struct evbuffer *buf);
+
+
+/** If this flag is set, then we will not use evbuffer_peek(),
+ * evbuffer_remove(), evbuffer_remove_buffer(), and so on to read bytes
+ * from this buffer: we'll only take bytes out of this buffer by
+ * writing them to the network (as with evbuffer_write_atmost), by
+ * removing them without observing them (as with evbuffer_drain),
+ * or by copying them all out at once (as with evbuffer_add_buffer).
+ *
+ * Using this option allows the implementation to use sendfile-based
+ * operations for evbuffer_add_file(); see that function for more
+ * information.
+ *
+ * This flag is on by default for bufferevents that can take advantage
+ * of it; you should never actually need to set it on a bufferevent's
+ * output buffer.
+ */
+#define EVBUFFER_FLAG_DRAINS_TO_FD 1
+
+/** Change the flags that are set for an evbuffer by adding more.
+ *
+ * @param buffer the evbuffer that the callback is watching.
+ * @param cb the callback whose status we want to change.
+ * @param flags One or more EVBUFFER_FLAG_* options
+ * @return 0 on success, -1 on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags);
+/** Change the flags that are set for an evbuffer by removing some.
+ *
+ * @param buffer the evbuffer that the callback is watching.
+ * @param cb the callback whose status we want to change.
+ * @param flags One or more EVBUFFER_FLAG_* options
+ * @return 0 on success, -1 on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags);
+
+/**
+ Returns the total number of bytes stored in the evbuffer
+
+ @param buf pointer to the evbuffer
+ @return the number of bytes stored in the evbuffer
+*/
+EVENT2_EXPORT_SYMBOL
+size_t evbuffer_get_length(const struct evbuffer *buf);
+
+/**
+ Returns the number of contiguous available bytes in the first buffer chain.
+
+ This is useful when processing data that might be split into multiple
+ chains, or that might all be in the first chain. Calls to
+ evbuffer_pullup() that cause reallocation and copying of data can thus be
+ avoided.
+
+ @param buf pointer to the evbuffer
+ @return 0 if no data is available, otherwise the number of available bytes
+ in the first buffer chain.
+*/
+EVENT2_EXPORT_SYMBOL
+size_t evbuffer_get_contiguous_space(const struct evbuffer *buf);
+
+/**
+ Expands the available space in an evbuffer.
+
+ Expands the available space in the evbuffer to at least datlen, so that
+ appending datlen additional bytes will not require any new allocations.
+
+ @param buf the evbuffer to be expanded
+ @param datlen the new minimum length requirement
+ @return 0 if successful, or -1 if an error occurred
+*/
+EVENT2_EXPORT_SYMBOL
+int evbuffer_expand(struct evbuffer *buf, size_t datlen);
+
+/**
+ Reserves space in the last chain or chains of an evbuffer.
+
+ Makes space available in the last chain or chains of an evbuffer that can
+ be arbitrarily written to by a user. The space does not become
+ available for reading until it has been committed with
+ evbuffer_commit_space().
+
+ The space is made available as one or more extents, represented by
+ an initial pointer and a length. You can force the memory to be
+ available as only one extent. Allowing more extents, however, makes the
+ function more efficient.
+
+ Multiple subsequent calls to this function will make the same space
+ available until evbuffer_commit_space() has been called.
+
+ It is an error to do anything that moves around the buffer's internal
+ memory structures before committing the space.
+
+ NOTE: The code currently does not ever use more than two extents.
+ This may change in future versions.
+
+ @param buf the evbuffer in which to reserve space.
+ @param size how much space to make available, at minimum. The
+ total length of the extents may be greater than the requested
+ length.
+ @param vec an array of one or more evbuffer_iovec structures to
+ hold pointers to the reserved extents of memory.
+ @param n_vec The length of the vec array. Must be at least 1;
+ 2 is more efficient.
+ @return the number of provided extents, or -1 on error.
+ @see evbuffer_commit_space()
+*/
+EVENT2_EXPORT_SYMBOL
+int
+evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,
+ struct evbuffer_iovec *vec, int n_vec);
+
+/**
+ Commits previously reserved space.
+
+ Commits some of the space previously reserved with
+ evbuffer_reserve_space(). It then becomes available for reading.
+
+ This function may return an error if the pointer in the extents do
+ not match those returned from evbuffer_reserve_space, or if data
+ has been added to the buffer since the space was reserved.
+
+ If you want to commit less data than you got reserved space for,
+ modify the iov_len pointer of the appropriate extent to a smaller
+ value. Note that you may have received more space than you
+ requested if it was available!
+
+ @param buf the evbuffer in which to reserve space.
+ @param vec one or two extents returned by evbuffer_reserve_space.
+ @param n_vecs the number of extents.
+ @return 0 on success, -1 on error
+ @see evbuffer_reserve_space()
+*/
+EVENT2_EXPORT_SYMBOL
+int evbuffer_commit_space(struct evbuffer *buf,
+ struct evbuffer_iovec *vec, int n_vecs);
+
+/**
+ Append data to the end of an evbuffer.
+
+ @param buf the evbuffer to be appended to
+ @param data pointer to the beginning of the data buffer
+ @param datlen the number of bytes to be copied from the data buffer
+ @return 0 on success, -1 on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_add(struct evbuffer *buf, const void *data, size_t datlen);
+
+
+/**
+ Read data from an evbuffer and drain the bytes read.
+
+ If more bytes are requested than are available in the evbuffer, we
+ only extract as many bytes as were available.
+
+ @param buf the evbuffer to be read from
+ @param data the destination buffer to store the result
+ @param datlen the maximum size of the destination buffer
+ @return the number of bytes read, or -1 if we can't drain the buffer.
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_remove(struct evbuffer *buf, void *data, size_t datlen);
+
+/**
+ Read data from an evbuffer, and leave the buffer unchanged.
+
+ If more bytes are requested than are available in the evbuffer, we
+ only extract as many bytes as were available.
+
+ @param buf the evbuffer to be read from
+ @param data_out the destination buffer to store the result
+ @param datlen the maximum size of the destination buffer
+ @return the number of bytes read, or -1 if we can't drain the buffer.
+ */
+EVENT2_EXPORT_SYMBOL
+ev_ssize_t evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen);
+
+/**
+ Read data from the middle of an evbuffer, and leave the buffer unchanged.
+
+ If more bytes are requested than are available in the evbuffer, we
+ only extract as many bytes as were available.
+
+ @param buf the evbuffer to be read from
+ @param pos the position to start reading from
+ @param data_out the destination buffer to store the result
+ @param datlen the maximum size of the destination buffer
+ @return the number of bytes read, or -1 if we can't drain the buffer.
+ */
+EVENT2_EXPORT_SYMBOL
+ev_ssize_t evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos, void *data_out, size_t datlen);
+
+/**
+ Read data from an evbuffer into another evbuffer, draining
+ the bytes from the source buffer. This function avoids copy
+ operations to the extent possible.
+
+ If more bytes are requested than are available in src, the src
+ buffer is drained completely.
+
+ @param src the evbuffer to be read from
+ @param dst the destination evbuffer to store the result into
+ @param datlen the maximum numbers of bytes to transfer
+ @return the number of bytes read
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst,
+ size_t datlen);
+
+/** Used to tell evbuffer_readln what kind of line-ending to look for.
+ */
+enum evbuffer_eol_style {
+ /** Any sequence of CR and LF characters is acceptable as an
+ * EOL.
+ *
+ * Note that this style can produce ambiguous results: the
+ * sequence "CRLF" will be treated as a single EOL if it is
+ * all in the buffer at once, but if you first read a CR from
+ * the network and later read an LF from the network, it will
+ * be treated as two EOLs.
+ */
+ EVBUFFER_EOL_ANY,
+ /** An EOL is an LF, optionally preceded by a CR. This style is
+ * most useful for implementing text-based internet protocols. */
+ EVBUFFER_EOL_CRLF,
+ /** An EOL is a CR followed by an LF. */
+ EVBUFFER_EOL_CRLF_STRICT,
+ /** An EOL is a LF. */
+ EVBUFFER_EOL_LF,
+ /** An EOL is a NUL character (that is, a single byte with value 0) */
+ EVBUFFER_EOL_NUL
+};
+
+/**
+ * Read a single line from an evbuffer.
+ *
+ * Reads a line terminated by an EOL as determined by the evbuffer_eol_style
+ * argument. Returns a newly allocated nul-terminated string; the caller must
+ * free the returned value. The EOL is not included in the returned string.
+ *
+ * @param buffer the evbuffer to read from
+ * @param n_read_out if non-NULL, points to a size_t that is set to the
+ * number of characters in the returned string. This is useful for
+ * strings that can contain NUL characters.
+ * @param eol_style the style of line-ending to use.
+ * @return pointer to a single line, or NULL if an error occurred
+ */
+EVENT2_EXPORT_SYMBOL
+char *evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
+ enum evbuffer_eol_style eol_style);
+
+/**
+ Move all data from one evbuffer into another evbuffer.
+
+ This is a destructive add. The data from one buffer moves into
+ the other buffer. However, no unnecessary memory copies occur.
+
+ @param outbuf the output buffer
+ @param inbuf the input buffer
+ @return 0 if successful, or -1 if an error occurred
+
+ @see evbuffer_remove_buffer()
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf);
+
+/**
+ Copy data from one evbuffer into another evbuffer.
+
+ This is a non-destructive add. The data from one buffer is copied
+ into the other buffer. However, no unnecessary memory copies occur.
+
+ Note that buffers already containing buffer references can't be added
+ to other buffers.
+
+ @param outbuf the output buffer
+ @param inbuf the input buffer
+ @return 0 if successful, or -1 if an error occurred
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_add_buffer_reference(struct evbuffer *outbuf,
+ struct evbuffer *inbuf);
+
+/**
+ A cleanup function for a piece of memory added to an evbuffer by
+ reference.
+
+ @see evbuffer_add_reference()
+ */
+typedef void (*evbuffer_ref_cleanup_cb)(const void *data,
+ size_t datalen, void *extra);
+
+/**
+ Reference memory into an evbuffer without copying.
+
+ The memory needs to remain valid until all the added data has been
+ read. This function keeps just a reference to the memory without
+ actually incurring the overhead of a copy.
+
+ @param outbuf the output buffer
+ @param data the memory to reference
+ @param datlen how memory to reference
+ @param cleanupfn callback to be invoked when the memory is no longer
+ referenced by this evbuffer.
+ @param cleanupfn_arg optional argument to the cleanup callback
+ @return 0 if successful, or -1 if an error occurred
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_add_reference(struct evbuffer *outbuf,
+ const void *data, size_t datlen,
+ evbuffer_ref_cleanup_cb cleanupfn, void *cleanupfn_arg);
+
+/**
+ Copy data from a file into the evbuffer for writing to a socket.
+
+ This function avoids unnecessary data copies between userland and
+ kernel. If sendfile is available and the EVBUFFER_FLAG_DRAINS_TO_FD
+ flag is set, it uses those functions. Otherwise, it tries to use
+ mmap (or CreateFileMapping on Windows).
+
+ The function owns the resulting file descriptor and will close it
+ when finished transferring data.
+
+ The results of using evbuffer_remove() or evbuffer_pullup() on
+ evbuffers whose data was added using this function are undefined.
+
+ For more fine-grained control, use evbuffer_add_file_segment.
+
+ @param outbuf the output buffer
+ @param fd the file descriptor
+ @param offset the offset from which to read data
+ @param length how much data to read, or -1 to read as much as possible.
+ (-1 requires that 'fd' support fstat.)
+ @return 0 if successful, or -1 if an error occurred
+*/
+
+EVENT2_EXPORT_SYMBOL
+int evbuffer_add_file(struct evbuffer *outbuf, int fd, ev_off_t offset,
+ ev_off_t length);
+
+/**
+ An evbuffer_file_segment holds a reference to a range of a file --
+ possibly the whole file! -- for use in writing from an evbuffer to a
+ socket. It could be implemented with mmap, sendfile, splice, or (if all
+ else fails) by just pulling all the data into RAM. A single
+ evbuffer_file_segment can be added more than once, and to more than one
+ evbuffer.
+ */
+struct evbuffer_file_segment;
+
+/**
+ Flag for creating evbuffer_file_segment: If this flag is set, then when
+ the evbuffer_file_segment is freed and no longer in use by any
+ evbuffer, the underlying fd is closed.
+ */
+#define EVBUF_FS_CLOSE_ON_FREE 0x01
+/**
+ Flag for creating evbuffer_file_segment: Disable memory-map based
+ implementations.
+ */
+#define EVBUF_FS_DISABLE_MMAP 0x02
+/**
+ Flag for creating evbuffer_file_segment: Disable direct fd-to-fd
+ implementations (including sendfile and splice).
+
+ You might want to use this option if data needs to be taken from the
+ evbuffer by any means other than writing it to the network: the sendfile
+ backend is fast, but it only works for sending files directly to the
+ network.
+ */
+#define EVBUF_FS_DISABLE_SENDFILE 0x04
+/**
+ Flag for creating evbuffer_file_segment: Do not allocate a lock for this
+ segment. If this option is set, then neither the segment nor any
+ evbuffer it is added to may ever be accessed from more than one thread
+ at a time.
+ */
+#define EVBUF_FS_DISABLE_LOCKING 0x08
+
+/**
+ A cleanup function for a evbuffer_file_segment added to an evbuffer
+ for reference.
+ */
+typedef void (*evbuffer_file_segment_cleanup_cb)(
+ struct evbuffer_file_segment const* seg, int flags, void* arg);
+
+/**
+ Create and return a new evbuffer_file_segment for reading data from a
+ file and sending it out via an evbuffer.
+
+ This function avoids unnecessary data copies between userland and
+ kernel. Where available, it uses sendfile or splice.
+
+ The file descriptor must not be closed so long as any evbuffer is using
+ this segment.
+
+ The results of using evbuffer_remove() or evbuffer_pullup() or any other
+ function that reads bytes from an evbuffer on any evbuffer containing
+ the newly returned segment are undefined, unless you pass the
+ EVBUF_FS_DISABLE_SENDFILE flag to this function.
+
+ @param fd an open file to read from.
+ @param offset an index within the file at which to start reading
+ @param length how much data to read, or -1 to read as much as possible.
+ (-1 requires that 'fd' support fstat.)
+ @param flags any number of the EVBUF_FS_* flags
+ @return a new evbuffer_file_segment, or NULL on failure.
+ **/
+EVENT2_EXPORT_SYMBOL
+struct evbuffer_file_segment *evbuffer_file_segment_new(
+ int fd, ev_off_t offset, ev_off_t length, unsigned flags);
+
+/**
+ Free an evbuffer_file_segment
+
+ It is safe to call this function even if the segment has been added to
+ one or more evbuffers. The evbuffer_file_segment will not be freed
+ until no more references to it exist.
+ */
+EVENT2_EXPORT_SYMBOL
+void evbuffer_file_segment_free(struct evbuffer_file_segment *seg);
+
+/**
+ Add cleanup callback and argument for the callback to an
+ evbuffer_file_segment.
+
+ The cleanup callback will be invoked when no more references to the
+ evbuffer_file_segment exist.
+ **/
+EVENT2_EXPORT_SYMBOL
+void evbuffer_file_segment_add_cleanup_cb(struct evbuffer_file_segment *seg,
+ evbuffer_file_segment_cleanup_cb cb, void* arg);
+
+/**
+ Insert some or all of an evbuffer_file_segment at the end of an evbuffer
+
+ Note that the offset and length parameters of this function have a
+ different meaning from those provided to evbuffer_file_segment_new: When
+ you create the segment, the offset is the offset _within the file_, and
+ the length is the length _of the segment_, whereas when you add a
+ segment to an evbuffer, the offset is _within the segment_ and the
+ length is the length of the _part of the segment you want to use.
+
+ In other words, if you have a 10 KiB file, and you create an
+ evbuffer_file_segment for it with offset 20 and length 1000, it will
+ refer to bytes 20..1019 inclusive. If you then pass this segment to
+ evbuffer_add_file_segment and specify an offset of 20 and a length of
+ 50, you will be adding bytes 40..99 inclusive.
+
+ @param buf the evbuffer to append to
+ @param seg the segment to add
+ @param offset the offset within the segment to start from
+ @param length the amount of data to add, or -1 to add it all.
+ @return 0 on success, -1 on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_add_file_segment(struct evbuffer *buf,
+ struct evbuffer_file_segment *seg, ev_off_t offset, ev_off_t length);
+
+/**
+ Append a formatted string to the end of an evbuffer.
+
+ The string is formated as printf.
+
+ @param buf the evbuffer that will be appended to
+ @param fmt a format string
+ @param ... arguments that will be passed to printf(3)
+ @return The number of bytes added if successful, or -1 if an error occurred.
+
+ @see evutil_printf(), evbuffer_add_vprintf()
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
+#ifdef __GNUC__
+ __attribute__((format(printf, 2, 3)))
+#endif
+;
+
+/**
+ Append a va_list formatted string to the end of an evbuffer.
+
+ @param buf the evbuffer that will be appended to
+ @param fmt a format string
+ @param ap a varargs va_list argument array that will be passed to vprintf(3)
+ @return The number of bytes added if successful, or -1 if an error occurred.
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
+#ifdef __GNUC__
+ __attribute__((format(printf, 2, 0)))
+#endif
+;
+
+
+/**
+ Remove a specified number of bytes data from the beginning of an evbuffer.
+
+ @param buf the evbuffer to be drained
+ @param len the number of bytes to drain from the beginning of the buffer
+ @return 0 on success, -1 on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_drain(struct evbuffer *buf, size_t len);
+
+
+/**
+ Write the contents of an evbuffer to a file descriptor.
+
+ The evbuffer will be drained after the bytes have been successfully written.
+
+ @param buffer the evbuffer to be written and drained
+ @param fd the file descriptor to be written to
+ @return the number of bytes written, or -1 if an error occurred
+ @see evbuffer_read()
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd);
+
+/**
+ Write some of the contents of an evbuffer to a file descriptor.
+
+ The evbuffer will be drained after the bytes have been successfully written.
+
+ @param buffer the evbuffer to be written and drained
+ @param fd the file descriptor to be written to
+ @param howmuch the largest allowable number of bytes to write, or -1
+ to write as many bytes as we can.
+ @return the number of bytes written, or -1 if an error occurred
+ @see evbuffer_read()
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd,
+ ev_ssize_t howmuch);
+
+/**
+ Read from a file descriptor and store the result in an evbuffer.
+
+ @param buffer the evbuffer to store the result
+ @param fd the file descriptor to read from
+ @param howmuch the number of bytes to be read
+ @return the number of bytes read, or -1 if an error occurred
+ @see evbuffer_write()
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_read(struct evbuffer *buffer, evutil_socket_t fd, int howmuch);
+
+/**
+ Search for a string within an evbuffer.
+
+ @param buffer the evbuffer to be searched
+ @param what the string to be searched for
+ @param len the length of the search string
+ @param start NULL or a pointer to a valid struct evbuffer_ptr.
+ @return a struct evbuffer_ptr whose 'pos' field has the offset of the
+ first occurrence of the string in the buffer after 'start'. The 'pos'
+ field of the result is -1 if the string was not found.
+ */
+EVENT2_EXPORT_SYMBOL
+struct evbuffer_ptr evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start);
+
+/**
+ Search for a string within part of an evbuffer.
+
+ @param buffer the evbuffer to be searched
+ @param what the string to be searched for
+ @param len the length of the search string
+ @param start NULL or a pointer to a valid struct evbuffer_ptr that
+ indicates where we should start searching.
+ @param end NULL or a pointer to a valid struct evbuffer_ptr that
+ indicates where we should stop searching.
+ @return a struct evbuffer_ptr whose 'pos' field has the offset of the
+ first occurrence of the string in the buffer after 'start'. The 'pos'
+ field of the result is -1 if the string was not found.
+ */
+EVENT2_EXPORT_SYMBOL
+struct evbuffer_ptr evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end);
+
+/**
+ Defines how to adjust an evbuffer_ptr by evbuffer_ptr_set()
+
+ @see evbuffer_ptr_set() */
+enum evbuffer_ptr_how {
+ /** Sets the pointer to the position; can be called on with an
+ uninitialized evbuffer_ptr. */
+ EVBUFFER_PTR_SET,
+ /** Advances the pointer by adding to the current position. */
+ EVBUFFER_PTR_ADD
+};
+
+/**
+ Sets the search pointer in the buffer to position.
+
+ There are two ways to use this function: you can call
+ evbuffer_ptr_set(buf, &pos, N, EVBUFFER_PTR_SET)
+ to move 'pos' to a position 'N' bytes after the start of the buffer, or
+ evbuffer_ptr_set(buf, &pos, N, EVBUFFER_PTR_ADD)
+ to move 'pos' forward by 'N' bytes.
+
+ If evbuffer_ptr is not initialized, this function can only be called
+ with EVBUFFER_PTR_SET.
+
+ An evbuffer_ptr can represent any position from the start of the buffer to
+ a position immediately after the end of the buffer.
+
+ @param buffer the evbuffer to be search
+ @param ptr a pointer to a struct evbuffer_ptr
+ @param position the position at which to start the next search
+ @param how determines how the pointer should be manipulated.
+ @returns 0 on success or -1 otherwise
+*/
+EVENT2_EXPORT_SYMBOL
+int
+evbuffer_ptr_set(struct evbuffer *buffer, struct evbuffer_ptr *ptr,
+ size_t position, enum evbuffer_ptr_how how);
+
+/**
+ Search for an end-of-line string within an evbuffer.
+
+ @param buffer the evbuffer to be searched
+ @param start NULL or a pointer to a valid struct evbuffer_ptr to start
+ searching at.
+ @param eol_len_out If non-NULL, the pointed-to value will be set to
+ the length of the end-of-line string.
+ @param eol_style The kind of EOL to look for; see evbuffer_readln() for
+ more information
+ @return a struct evbuffer_ptr whose 'pos' field has the offset of the
+ first occurrence EOL in the buffer after 'start'. The 'pos'
+ field of the result is -1 if the string was not found.
+ */
+EVENT2_EXPORT_SYMBOL
+struct evbuffer_ptr evbuffer_search_eol(struct evbuffer *buffer,
+ struct evbuffer_ptr *start, size_t *eol_len_out,
+ enum evbuffer_eol_style eol_style);
+
+/** Function to peek at data inside an evbuffer without removing it or
+ copying it out.
+
+ Pointers to the data are returned by filling the 'vec_out' array
+ with pointers to one or more extents of data inside the buffer.
+
+ The total data in the extents that you get back may be more than
+ you requested (if there is more data last extent than you asked
+ for), or less (if you do not provide enough evbuffer_iovecs, or if
+ the buffer does not have as much data as you asked to see).
+
+ @param buffer the evbuffer to peek into,
+ @param len the number of bytes to try to peek. If len is negative, we
+ will try to fill as much of vec_out as we can. If len is negative
+ and vec_out is not provided, we return the number of evbuffer_iovecs
+ that would be needed to get all the data in the buffer.
+ @param start_at an evbuffer_ptr indicating the point at which we
+ should start looking for data. NULL means, "At the start of the
+ buffer."
+ @param vec_out an array of evbuffer_iovec
+ @param n_vec the length of vec_out. If 0, we only count how many
+ extents would be necessary to point to the requested amount of
+ data.
+ @return The number of extents needed. This may be less than n_vec
+ if we didn't need all the evbuffer_iovecs we were given, or more
+ than n_vec if we would need more to return all the data that was
+ requested.
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
+ struct evbuffer_ptr *start_at,
+ struct evbuffer_iovec *vec_out, int n_vec);
+
+
+/** Structure passed to an evbuffer_cb_func evbuffer callback
+
+ @see evbuffer_cb_func, evbuffer_add_cb()
+ */
+struct evbuffer_cb_info {
+ /** The number of bytes in this evbuffer when callbacks were last
+ * invoked. */
+ size_t orig_size;
+ /** The number of bytes added since callbacks were last invoked. */
+ size_t n_added;
+ /** The number of bytes removed since callbacks were last invoked. */
+ size_t n_deleted;
+};
+
+/** Type definition for a callback that is invoked whenever data is added or
+ removed from an evbuffer.
+
+ An evbuffer may have one or more callbacks set at a time. The order
+ in which they are executed is undefined.
+
+ A callback function may add more callbacks, or remove itself from the
+ list of callbacks, or add or remove data from the buffer. It may not
+ remove another callback from the list.
+
+ If a callback adds or removes data from the buffer or from another
+ buffer, this can cause a recursive invocation of your callback or
+ other callbacks. If you ask for an infinite loop, you might just get
+ one: watch out!
+
+ @param buffer the buffer whose size has changed
+ @param info a structure describing how the buffer changed.
+ @param arg a pointer to user data
+*/
+typedef void (*evbuffer_cb_func)(struct evbuffer *buffer, const struct evbuffer_cb_info *info, void *arg);
+
+struct evbuffer_cb_entry;
+/** Add a new callback to an evbuffer.
+
+ Subsequent calls to evbuffer_add_cb() add new callbacks. To remove this
+ callback, call evbuffer_remove_cb or evbuffer_remove_cb_entry.
+
+ @param buffer the evbuffer to be monitored
+ @param cb the callback function to invoke when the evbuffer is modified,
+ or NULL to remove all callbacks.
+ @param cbarg an argument to be provided to the callback function
+ @return a handle to the callback on success, or NULL on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+struct evbuffer_cb_entry *evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg);
+
+/** Remove a callback from an evbuffer, given a handle returned from
+ evbuffer_add_cb.
+
+ Calling this function invalidates the handle.
+
+ @return 0 if a callback was removed, or -1 if no matching callback was
+ found.
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_remove_cb_entry(struct evbuffer *buffer,
+ struct evbuffer_cb_entry *ent);
+
+/** Remove a callback from an evbuffer, given the function and argument
+ used to add it.
+
+ @return 0 if a callback was removed, or -1 if no matching callback was
+ found.
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg);
+
+/** If this flag is not set, then a callback is temporarily disabled, and
+ * should not be invoked.
+ *
+ * @see evbuffer_cb_set_flags(), evbuffer_cb_clear_flags()
+ */
+#define EVBUFFER_CB_ENABLED 1
+
+/** Change the flags that are set for a callback on a buffer by adding more.
+
+ @param buffer the evbuffer that the callback is watching.
+ @param cb the callback whose status we want to change.
+ @param flags EVBUFFER_CB_ENABLED to re-enable the callback.
+ @return 0 on success, -1 on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_cb_set_flags(struct evbuffer *buffer,
+ struct evbuffer_cb_entry *cb, ev_uint32_t flags);
+
+/** Change the flags that are set for a callback on a buffer by removing some
+
+ @param buffer the evbuffer that the callback is watching.
+ @param cb the callback whose status we want to change.
+ @param flags EVBUFFER_CB_ENABLED to disable the callback.
+ @return 0 on success, -1 on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_cb_clear_flags(struct evbuffer *buffer,
+ struct evbuffer_cb_entry *cb, ev_uint32_t flags);
+
+#if 0
+/** Postpone calling a given callback until unsuspend is called later.
+
+ This is different from disabling the callback, since the callback will get
+ invoked later if the buffer size changes between now and when we unsuspend
+ it.
+
+ @param the buffer that the callback is watching.
+ @param cb the callback we want to suspend.
+ */
+EVENT2_EXPORT_SYMBOL
+void evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb);
+/** Stop postponing a callback that we postponed with evbuffer_cb_suspend.
+
+ If data was added to or removed from the buffer while the callback was
+ suspended, the callback will get called once now.
+
+ @param the buffer that the callback is watching.
+ @param cb the callback we want to stop suspending.
+ */
+EVENT2_EXPORT_SYMBOL
+void evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb);
+#endif
+
+/**
+ Makes the data at the beginning of an evbuffer contiguous.
+
+ @param buf the evbuffer to make contiguous
+ @param size the number of bytes to make contiguous, or -1 to make the
+ entire buffer contiguous.
+ @return a pointer to the contiguous memory array, or NULL if param size
+ requested more data than is present in the buffer.
+*/
+
+EVENT2_EXPORT_SYMBOL
+unsigned char *evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size);
+
+/**
+ Prepends data to the beginning of the evbuffer
+
+ @param buf the evbuffer to which to prepend data
+ @param data a pointer to the memory to prepend
+ @param size the number of bytes to prepend
+ @return 0 if successful, or -1 otherwise
+*/
+
+EVENT2_EXPORT_SYMBOL
+int evbuffer_prepend(struct evbuffer *buf, const void *data, size_t size);
+
+/**
+ Prepends all data from the src evbuffer to the beginning of the dst
+ evbuffer.
+
+ @param dst the evbuffer to which to prepend data
+ @param src the evbuffer to prepend; it will be emptied as a result
+ @return 0 if successful, or -1 otherwise
+*/
+EVENT2_EXPORT_SYMBOL
+int evbuffer_prepend_buffer(struct evbuffer *dst, struct evbuffer* src);
+
+/**
+ Prevent calls that modify an evbuffer from succeeding. A buffer may
+ frozen at the front, at the back, or at both the front and the back.
+
+ If the front of a buffer is frozen, operations that drain data from
+ the front of the buffer, or that prepend data to the buffer, will
+ fail until it is unfrozen. If the back a buffer is frozen, operations
+ that append data from the buffer will fail until it is unfrozen.
+
+ @param buf The buffer to freeze
+ @param at_front If true, we freeze the front of the buffer. If false,
+ we freeze the back.
+ @return 0 on success, -1 on failure.
+*/
+EVENT2_EXPORT_SYMBOL
+int evbuffer_freeze(struct evbuffer *buf, int at_front);
+/**
+ Re-enable calls that modify an evbuffer.
+
+ @param buf The buffer to un-freeze
+ @param at_front If true, we unfreeze the front of the buffer. If false,
+ we unfreeze the back.
+ @return 0 on success, -1 on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_unfreeze(struct evbuffer *buf, int at_front);
+
+struct event_base;
+/**
+ Force all the callbacks on an evbuffer to be run, not immediately after
+ the evbuffer is altered, but instead from inside the event loop.
+
+ This can be used to serialize all the callbacks to a single thread
+ of execution.
+ */
+EVENT2_EXPORT_SYMBOL
+int evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base);
+
+/**
+ Append data from 1 or more iovec's to an evbuffer
+
+ Calculates the number of bytes needed for an iovec structure and guarantees
+ all data will fit into a single chain. Can be used in lieu of functionality
+ which calls evbuffer_add() constantly before being used to increase
+ performance.
+
+ @param buffer the destination buffer
+ @param vec the source iovec
+ @param n_vec the number of iovec structures.
+ @return the number of bytes successfully written to the output buffer.
+*/
+EVENT2_EXPORT_SYMBOL
+size_t evbuffer_add_iovec(struct evbuffer * buffer, struct evbuffer_iovec * vec, int n_vec);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT2_BUFFER_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/buffer_compat.h b/libs/libevent/include/event2/buffer_compat.h
new file mode 100644
index 0000000000..24f828c210
--- /dev/null
+++ b/libs/libevent/include/event2/buffer_compat.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef EVENT2_BUFFER_COMPAT_H_INCLUDED_
+#define EVENT2_BUFFER_COMPAT_H_INCLUDED_
+
+#include <event2/visibility.h>
+
+/** @file event2/buffer_compat.h
+
+ Obsolete and deprecated versions of the functions in buffer.h: provided
+ only for backward compatibility.
+ */
+
+
+/**
+ Obsolete alias for evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY).
+
+ @deprecated This function is deprecated because its behavior is not correct
+ for almost any protocol, and also because it's wholly subsumed by
+ evbuffer_readln().
+
+ @param buffer the evbuffer to read from
+ @return pointer to a single line, or NULL if an error occurred
+
+*/
+EVENT2_EXPORT_SYMBOL
+char *evbuffer_readline(struct evbuffer *buffer);
+
+/** Type definition for a callback that is invoked whenever data is added or
+ removed from an evbuffer.
+
+ An evbuffer may have one or more callbacks set at a time. The order
+ in which they are executed is undefined.
+
+ A callback function may add more callbacks, or remove itself from the
+ list of callbacks, or add or remove data from the buffer. It may not
+ remove another callback from the list.
+
+ If a callback adds or removes data from the buffer or from another
+ buffer, this can cause a recursive invocation of your callback or
+ other callbacks. If you ask for an infinite loop, you might just get
+ one: watch out!
+
+ @param buffer the buffer whose size has changed
+ @param old_len the previous length of the buffer
+ @param new_len the current length of the buffer
+ @param arg a pointer to user data
+*/
+typedef void (*evbuffer_cb)(struct evbuffer *buffer, size_t old_len, size_t new_len, void *arg);
+
+/**
+ Replace all callbacks on an evbuffer with a single new callback, or
+ remove them.
+
+ Subsequent calls to evbuffer_setcb() replace callbacks set by previous
+ calls. Setting the callback to NULL removes any previously set callback.
+
+ @deprecated This function is deprecated because it clears all previous
+ callbacks set on the evbuffer, which can cause confusing behavior if
+ multiple parts of the code all want to add their own callbacks on a
+ buffer. Instead, use evbuffer_add(), evbuffer_del(), and
+ evbuffer_setflags() to manage your own evbuffer callbacks without
+ interfering with callbacks set by others.
+
+ @param buffer the evbuffer to be monitored
+ @param cb the callback function to invoke when the evbuffer is modified,
+ or NULL to remove all callbacks.
+ @param cbarg an argument to be provided to the callback function
+ */
+EVENT2_EXPORT_SYMBOL
+void evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg);
+
+
+/**
+ Find a string within an evbuffer.
+
+ @param buffer the evbuffer to be searched
+ @param what the string to be searched for
+ @param len the length of the search string
+ @return a pointer to the beginning of the search string, or NULL if the search failed.
+ */
+EVENT2_EXPORT_SYMBOL
+unsigned char *evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len);
+
+/** deprecated in favor of calling the functions directly */
+#define EVBUFFER_LENGTH(x) evbuffer_get_length(x)
+/** deprecated in favor of calling the functions directly */
+#define EVBUFFER_DATA(x) evbuffer_pullup((x), -1)
+
+#endif
+
diff --git a/libs/libevent/include/event2/bufferevent.h b/libs/libevent/include/event2/bufferevent.h
new file mode 100644
index 0000000000..825918e3a4
--- /dev/null
+++ b/libs/libevent/include/event2/bufferevent.h
@@ -0,0 +1,1021 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_BUFFEREVENT_H_INCLUDED_
+#define EVENT2_BUFFEREVENT_H_INCLUDED_
+
+/**
+ @file event2/bufferevent.h
+
+ Functions for buffering data for network sending or receiving. Bufferevents
+ are higher level than evbuffers: each has an underlying evbuffer for reading
+ and one for writing, and callbacks that are invoked under certain
+ circumstances.
+
+ A bufferevent provides input and output buffers that get filled and
+ drained automatically. The user of a bufferevent no longer deals
+ directly with the I/O, but instead is reading from input and writing
+ to output buffers.
+
+ Once initialized, the bufferevent structure can be used repeatedly
+ with bufferevent_enable() and bufferevent_disable().
+
+ When reading is enabled, the bufferevent will try to read from the
+ file descriptor onto its input buffer, and call the read callback.
+ When writing is enabled, the bufferevent will try to write data onto its
+ file descriptor when the output buffer has enough data, and call the write
+ callback when the output buffer is sufficiently drained.
+
+ Bufferevents come in several flavors, including:
+
+ <dl>
+ <dt>Socket-based bufferevents</dt>
+ <dd>A bufferevent that reads and writes data onto a network
+ socket. Created with bufferevent_socket_new().</dd>
+
+ <dt>Paired bufferevents</dt>
+ <dd>A pair of bufferevents that send and receive data to one
+ another without touching the network. Created with
+ bufferevent_pair_new().</dd>
+
+ <dt>Filtering bufferevents</dt>
+ <dd>A bufferevent that transforms data, and sends or receives it
+ over another underlying bufferevent. Created with
+ bufferevent_filter_new().</dd>
+
+ <dt>SSL-backed bufferevents</dt>
+ <dd>A bufferevent that uses the openssl library to send and
+ receive data over an encrypted connection. Created with
+ bufferevent_openssl_socket_new() or
+ bufferevent_openssl_filter_new().</dd>
+ </dl>
+ */
+
+#include <event2/visibility.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <event2/event-config.h>
+#ifdef EVENT__HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+/* For int types. */
+#include <event2/util.h>
+
+/** @name Bufferevent event codes
+
+ These flags are passed as arguments to a bufferevent's event callback.
+
+ @{
+*/
+#define BEV_EVENT_READING 0x01 /**< error encountered while reading */
+#define BEV_EVENT_WRITING 0x02 /**< error encountered while writing */
+#define BEV_EVENT_EOF 0x10 /**< eof file reached */
+#define BEV_EVENT_ERROR 0x20 /**< unrecoverable error encountered */
+#define BEV_EVENT_TIMEOUT 0x40 /**< user-specified timeout reached */
+#define BEV_EVENT_CONNECTED 0x80 /**< connect operation finished. */
+/**@}*/
+
+/**
+ An opaque type for handling buffered IO
+
+ @see event2/bufferevent.h
+ */
+struct bufferevent
+#ifdef EVENT_IN_DOXYGEN_
+{}
+#endif
+;
+struct event_base;
+struct evbuffer;
+struct sockaddr;
+
+/**
+ A read or write callback for a bufferevent.
+
+ The read callback is triggered when new data arrives in the input
+ buffer and the amount of readable data exceed the low watermark
+ which is 0 by default.
+
+ The write callback is triggered if the write buffer has been
+ exhausted or fell below its low watermark.
+
+ @param bev the bufferevent that triggered the callback
+ @param ctx the user-specified context for this bufferevent
+ */
+typedef void (*bufferevent_data_cb)(struct bufferevent *bev, void *ctx);
+
+/**
+ An event/error callback for a bufferevent.
+
+ The event callback is triggered if either an EOF condition or another
+ unrecoverable error was encountered.
+
+ For bufferevents with deferred callbacks, this is a bitwise OR of all errors
+ that have happened on the bufferevent since the last callback invocation.
+
+ @param bev the bufferevent for which the error condition was reached
+ @param what a conjunction of flags: BEV_EVENT_READING or BEV_EVENT_WRITING
+ to indicate if the error was encountered on the read or write path,
+ and one of the following flags: BEV_EVENT_EOF, BEV_EVENT_ERROR,
+ BEV_EVENT_TIMEOUT, BEV_EVENT_CONNECTED.
+
+ @param ctx the user-specified context for this bufferevent
+*/
+typedef void (*bufferevent_event_cb)(struct bufferevent *bev, short what, void *ctx);
+
+/** Options that can be specified when creating a bufferevent */
+enum bufferevent_options {
+ /** If set, we close the underlying file
+ * descriptor/bufferevent/whatever when this bufferevent is freed. */
+ BEV_OPT_CLOSE_ON_FREE = (1<<0),
+
+ /** If set, and threading is enabled, operations on this bufferevent
+ * are protected by a lock */
+ BEV_OPT_THREADSAFE = (1<<1),
+
+ /** If set, callbacks are run deferred in the event loop. */
+ BEV_OPT_DEFER_CALLBACKS = (1<<2),
+
+ /** If set, callbacks are executed without locks being held on the
+ * bufferevent. This option currently requires that
+ * BEV_OPT_DEFER_CALLBACKS also be set; a future version of Libevent
+ * might remove the requirement.*/
+ BEV_OPT_UNLOCK_CALLBACKS = (1<<3)
+};
+
+/**
+ Create a new socket bufferevent over an existing socket.
+
+ @param base the event base to associate with the new bufferevent.
+ @param fd the file descriptor from which data is read and written to.
+ This file descriptor is not allowed to be a pipe(2).
+ It is safe to set the fd to -1, so long as you later
+ set it with bufferevent_setfd or bufferevent_socket_connect().
+ @param options Zero or more BEV_OPT_* flags
+ @return a pointer to a newly allocated bufferevent struct, or NULL if an
+ error occurred
+ @see bufferevent_free()
+ */
+EVENT2_EXPORT_SYMBOL
+struct bufferevent *bufferevent_socket_new(struct event_base *base, evutil_socket_t fd, int options);
+
+/**
+ Launch a connect() attempt with a socket-based bufferevent.
+
+ When the connect succeeds, the eventcb will be invoked with
+ BEV_EVENT_CONNECTED set.
+
+ If the bufferevent does not already have a socket set, we allocate a new
+ socket here and make it nonblocking before we begin.
+
+ If no address is provided, we assume that the socket is already connecting,
+ and configure the bufferevent so that a BEV_EVENT_CONNECTED event will be
+ yielded when it is done connecting.
+
+ @param bufev an existing bufferevent allocated with
+ bufferevent_socket_new().
+ @param addr the address we should connect to
+ @param socklen The length of the address
+ @return 0 on success, -1 on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_socket_connect(struct bufferevent *, const struct sockaddr *, int);
+
+struct evdns_base;
+/**
+ Resolve the hostname 'hostname' and connect to it as with
+ bufferevent_socket_connect().
+
+ @param bufev An existing bufferevent allocated with bufferevent_socket_new()
+ @param evdns_base Optionally, an evdns_base to use for resolving hostnames
+ asynchronously. May be set to NULL for a blocking resolve.
+ @param family A preferred address family to resolve addresses to, or
+ AF_UNSPEC for no preference. Only AF_INET, AF_INET6, and AF_UNSPEC are
+ supported.
+ @param hostname The hostname to resolve; see below for notes on recognized
+ formats
+ @param port The port to connect to on the resolved address.
+ @return 0 if successful, -1 on failure.
+
+ Recognized hostname formats are:
+
+ www.example.com (hostname)
+ 1.2.3.4 (ipv4address)
+ ::1 (ipv6address)
+ [::1] ([ipv6address])
+
+ Performance note: If you do not provide an evdns_base, this function
+ may block while it waits for a DNS response. This is probably not
+ what you want.
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_socket_connect_hostname(struct bufferevent *,
+ struct evdns_base *, int, const char *, int);
+
+/**
+ Return the error code for the last failed DNS lookup attempt made by
+ bufferevent_socket_connect_hostname().
+
+ @param bev The bufferevent object.
+ @return DNS error code.
+ @see evutil_gai_strerror()
+*/
+EVENT2_EXPORT_SYMBOL
+int bufferevent_socket_get_dns_error(struct bufferevent *bev);
+
+/**
+ Assign a bufferevent to a specific event_base.
+
+ NOTE that only socket bufferevents support this function.
+
+ @param base an event_base returned by event_init()
+ @param bufev a bufferevent struct returned by bufferevent_new()
+ or bufferevent_socket_new()
+ @return 0 if successful, or -1 if an error occurred
+ @see bufferevent_new()
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_base_set(struct event_base *base, struct bufferevent *bufev);
+
+/**
+ Return the event_base used by a bufferevent
+*/
+EVENT2_EXPORT_SYMBOL
+struct event_base *bufferevent_get_base(struct bufferevent *bev);
+
+/**
+ Assign a priority to a bufferevent.
+
+ Only supported for socket bufferevents.
+
+ @param bufev a bufferevent struct
+ @param pri the priority to be assigned
+ @return 0 if successful, or -1 if an error occurred
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_priority_set(struct bufferevent *bufev, int pri);
+
+/**
+ Return the priority of a bufferevent.
+
+ Only supported for socket bufferevents
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_get_priority(const struct bufferevent *bufev);
+
+/**
+ Deallocate the storage associated with a bufferevent structure.
+
+ If there is pending data to write on the bufferevent, it probably won't be
+ flushed before the bufferevent is freed.
+
+ @param bufev the bufferevent structure to be freed.
+ */
+EVENT2_EXPORT_SYMBOL
+void bufferevent_free(struct bufferevent *bufev);
+
+
+/**
+ Changes the callbacks for a bufferevent.
+
+ @param bufev the bufferevent object for which to change callbacks
+ @param readcb callback to invoke when there is data to be read, or NULL if
+ no callback is desired
+ @param writecb callback to invoke when the file descriptor is ready for
+ writing, or NULL if no callback is desired
+ @param eventcb callback to invoke when there is an event on the file
+ descriptor
+ @param cbarg an argument that will be supplied to each of the callbacks
+ (readcb, writecb, and errorcb)
+ @see bufferevent_new()
+ */
+EVENT2_EXPORT_SYMBOL
+void bufferevent_setcb(struct bufferevent *bufev,
+ bufferevent_data_cb readcb, bufferevent_data_cb writecb,
+ bufferevent_event_cb eventcb, void *cbarg);
+
+/**
+ Retrieves the callbacks for a bufferevent.
+
+ @param bufev the bufferevent to examine.
+ @param readcb_ptr if readcb_ptr is nonnull, *readcb_ptr is set to the current
+ read callback for the bufferevent.
+ @param writecb_ptr if writecb_ptr is nonnull, *writecb_ptr is set to the
+ current write callback for the bufferevent.
+ @param eventcb_ptr if eventcb_ptr is nonnull, *eventcb_ptr is set to the
+ current event callback for the bufferevent.
+ @param cbarg_ptr if cbarg_ptr is nonnull, *cbarg_ptr is set to the current
+ callback argument for the bufferevent.
+ @see buffervent_setcb()
+*/
+EVENT2_EXPORT_SYMBOL
+void bufferevent_getcb(struct bufferevent *bufev,
+ bufferevent_data_cb *readcb_ptr,
+ bufferevent_data_cb *writecb_ptr,
+ bufferevent_event_cb *eventcb_ptr,
+ void **cbarg_ptr);
+
+/**
+ Changes the file descriptor on which the bufferevent operates.
+ Not supported for all bufferevent types.
+
+ @param bufev the bufferevent object for which to change the file descriptor
+ @param fd the file descriptor to operate on
+*/
+EVENT2_EXPORT_SYMBOL
+int bufferevent_setfd(struct bufferevent *bufev, evutil_socket_t fd);
+
+/**
+ Returns the file descriptor associated with a bufferevent, or -1 if
+ no file descriptor is associated with the bufferevent.
+ */
+EVENT2_EXPORT_SYMBOL
+evutil_socket_t bufferevent_getfd(struct bufferevent *bufev);
+
+/**
+ Returns the underlying bufferevent associated with a bufferevent (if
+ the bufferevent is a wrapper), or NULL if there is no underlying bufferevent.
+ */
+EVENT2_EXPORT_SYMBOL
+struct bufferevent *bufferevent_get_underlying(struct bufferevent *bufev);
+
+/**
+ Write data to a bufferevent buffer.
+
+ The bufferevent_write() function can be used to write data to the file
+ descriptor. The data is appended to the output buffer and written to the
+ descriptor automatically as it becomes available for writing.
+
+ @param bufev the bufferevent to be written to
+ @param data a pointer to the data to be written
+ @param size the length of the data, in bytes
+ @return 0 if successful, or -1 if an error occurred
+ @see bufferevent_write_buffer()
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_write(struct bufferevent *bufev,
+ const void *data, size_t size);
+
+
+/**
+ Write data from an evbuffer to a bufferevent buffer. The evbuffer is
+ being drained as a result.
+
+ @param bufev the bufferevent to be written to
+ @param buf the evbuffer to be written
+ @return 0 if successful, or -1 if an error occurred
+ @see bufferevent_write()
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf);
+
+
+/**
+ Read data from a bufferevent buffer.
+
+ The bufferevent_read() function is used to read data from the input buffer.
+
+ @param bufev the bufferevent to be read from
+ @param data pointer to a buffer that will store the data
+ @param size the size of the data buffer, in bytes
+ @return the amount of data read, in bytes.
+ */
+EVENT2_EXPORT_SYMBOL
+size_t bufferevent_read(struct bufferevent *bufev, void *data, size_t size);
+
+/**
+ Read data from a bufferevent buffer into an evbuffer. This avoids
+ memory copies.
+
+ @param bufev the bufferevent to be read from
+ @param buf the evbuffer to which to add data
+ @return 0 if successful, or -1 if an error occurred.
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_read_buffer(struct bufferevent *bufev, struct evbuffer *buf);
+
+/**
+ Returns the input buffer.
+
+ The user MUST NOT set the callback on this buffer.
+
+ @param bufev the bufferevent from which to get the evbuffer
+ @return the evbuffer object for the input buffer
+ */
+
+EVENT2_EXPORT_SYMBOL
+struct evbuffer *bufferevent_get_input(struct bufferevent *bufev);
+
+/**
+ Returns the output buffer.
+
+ The user MUST NOT set the callback on this buffer.
+
+ When filters are being used, the filters need to be manually
+ triggered if the output buffer was manipulated.
+
+ @param bufev the bufferevent from which to get the evbuffer
+ @return the evbuffer object for the output buffer
+ */
+
+EVENT2_EXPORT_SYMBOL
+struct evbuffer *bufferevent_get_output(struct bufferevent *bufev);
+
+/**
+ Enable a bufferevent.
+
+ @param bufev the bufferevent to be enabled
+ @param event any combination of EV_READ | EV_WRITE.
+ @return 0 if successful, or -1 if an error occurred
+ @see bufferevent_disable()
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_enable(struct bufferevent *bufev, short event);
+
+/**
+ Disable a bufferevent.
+
+ @param bufev the bufferevent to be disabled
+ @param event any combination of EV_READ | EV_WRITE.
+ @return 0 if successful, or -1 if an error occurred
+ @see bufferevent_enable()
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_disable(struct bufferevent *bufev, short event);
+
+/**
+ Return the events that are enabled on a given bufferevent.
+
+ @param bufev the bufferevent to inspect
+ @return A combination of EV_READ | EV_WRITE
+ */
+EVENT2_EXPORT_SYMBOL
+short bufferevent_get_enabled(struct bufferevent *bufev);
+
+/**
+ Set the read and write timeout for a bufferevent.
+
+ A bufferevent's timeout will fire the first time that the indicated
+ amount of time has elapsed since a successful read or write operation,
+ during which the bufferevent was trying to read or write.
+
+ (In other words, if reading or writing is disabled, or if the
+ bufferevent's read or write operation has been suspended because
+ there's no data to write, or not enough banwidth, or so on, the
+ timeout isn't active. The timeout only becomes active when we we're
+ willing to actually read or write.)
+
+ Calling bufferevent_enable or setting a timeout for a bufferevent
+ whose timeout is already pending resets its timeout.
+
+ If the timeout elapses, the corresponding operation (EV_READ or
+ EV_WRITE) becomes disabled until you re-enable it again. The
+ bufferevent's event callback is called with the
+ BEV_EVENT_TIMEOUT|BEV_EVENT_READING or
+ BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING.
+
+ @param bufev the bufferevent to be modified
+ @param timeout_read the read timeout, or NULL
+ @param timeout_write the write timeout, or NULL
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_set_timeouts(struct bufferevent *bufev,
+ const struct timeval *timeout_read, const struct timeval *timeout_write);
+
+/**
+ Sets the watermarks for read and write events.
+
+ On input, a bufferevent does not invoke the user read callback unless
+ there is at least low watermark data in the buffer. If the read buffer
+ is beyond the high watermark, the bufferevent stops reading from the network.
+
+ On output, the user write callback is invoked whenever the buffered data
+ falls below the low watermark. Filters that write to this bufev will try
+ not to write more bytes to this buffer than the high watermark would allow,
+ except when flushing.
+
+ @param bufev the bufferevent to be modified
+ @param events EV_READ, EV_WRITE or both
+ @param lowmark the lower watermark to set
+ @param highmark the high watermark to set
+*/
+
+EVENT2_EXPORT_SYMBOL
+void bufferevent_setwatermark(struct bufferevent *bufev, short events,
+ size_t lowmark, size_t highmark);
+
+/**
+ Retrieves the watermarks for read or write events.
+ Returns non-zero if events contains not only EV_READ or EV_WRITE.
+ Returns zero if events equal EV_READ or EV_WRITE
+
+ @param bufev the bufferevent to be examined
+ @param events EV_READ or EV_WRITE
+ @param lowmark receives the lower watermark if not NULL
+ @param highmark receives the high watermark if not NULL
+*/
+EVENT2_EXPORT_SYMBOL
+int bufferevent_getwatermark(struct bufferevent *bufev, short events,
+ size_t *lowmark, size_t *highmark);
+
+/**
+ Acquire the lock on a bufferevent. Has no effect if locking was not
+ enabled with BEV_OPT_THREADSAFE.
+ */
+EVENT2_EXPORT_SYMBOL
+void bufferevent_lock(struct bufferevent *bufev);
+
+/**
+ Release the lock on a bufferevent. Has no effect if locking was not
+ enabled with BEV_OPT_THREADSAFE.
+ */
+EVENT2_EXPORT_SYMBOL
+void bufferevent_unlock(struct bufferevent *bufev);
+
+
+/**
+ * Public interface to manually increase the reference count of a bufferevent
+ * this is useful in situations where a user may reference the bufferevent
+ * somewhere eles (unknown to libevent)
+ *
+ * @param bufev the bufferevent to increase the refcount on
+ *
+ */
+EVENT2_EXPORT_SYMBOL
+void bufferevent_incref(struct bufferevent *bufev);
+
+/**
+ * Public interface to manually decrement the reference count of a bufferevent
+ *
+ * Warning: make sure you know what you're doing. This is mainly used in
+ * conjunction with bufferevent_incref(). This will free up all data associated
+ * with a bufferevent if the reference count hits 0.
+ *
+ * @param bufev the bufferevent to decrement the refcount on
+ *
+ * @return 1 if the bufferevent was freed, otherwise 0 (still referenced)
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_decref(struct bufferevent *bufev);
+
+/**
+ Flags that can be passed into filters to let them know how to
+ deal with the incoming data.
+*/
+enum bufferevent_flush_mode {
+ /** usually set when processing data */
+ BEV_NORMAL = 0,
+
+ /** want to checkpoint all data sent. */
+ BEV_FLUSH = 1,
+
+ /** encountered EOF on read or done sending data */
+ BEV_FINISHED = 2
+};
+
+/**
+ Triggers the bufferevent to produce more data if possible.
+
+ @param bufev the bufferevent object
+ @param iotype either EV_READ or EV_WRITE or both.
+ @param mode either BEV_NORMAL or BEV_FLUSH or BEV_FINISHED
+ @return -1 on failure, 0 if no data was produces, 1 if data was produced
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_flush(struct bufferevent *bufev,
+ short iotype,
+ enum bufferevent_flush_mode mode);
+
+/**
+ Flags for bufferevent_trigger(_event) that modify when and how to trigger
+ the callback.
+*/
+enum bufferevent_trigger_options {
+ /** trigger the callback regardless of the watermarks */
+ BEV_TRIG_IGNORE_WATERMARKS = (1<<16),
+
+ /** defer even if the callbacks are not */
+ BEV_TRIG_DEFER_CALLBACKS = BEV_OPT_DEFER_CALLBACKS
+
+ /* (Note: for internal reasons, these need to be disjoint from
+ * bufferevent_options, except when they mean the same thing. */
+};
+
+/**
+ Triggers bufferevent data callbacks.
+
+ The function will honor watermarks unless options contain
+ BEV_TRIG_IGNORE_WATERMARKS. If the options contain BEV_OPT_DEFER_CALLBACKS,
+ the callbacks are deferred.
+
+ @param bufev the bufferevent object
+ @param iotype either EV_READ or EV_WRITE or both.
+ @param options
+ */
+EVENT2_EXPORT_SYMBOL
+void bufferevent_trigger(struct bufferevent *bufev, short iotype,
+ int options);
+
+/**
+ Triggers the bufferevent event callback.
+
+ If the options contain BEV_OPT_DEFER_CALLBACKS, the callbacks are deferred.
+
+ @param bufev the bufferevent object
+ @param what the flags to pass onto the event callback
+ @param options
+ */
+EVENT2_EXPORT_SYMBOL
+void bufferevent_trigger_event(struct bufferevent *bufev, short what,
+ int options);
+
+/**
+ @name Filtering support
+
+ @{
+*/
+/**
+ Values that filters can return.
+ */
+enum bufferevent_filter_result {
+ /** everything is okay */
+ BEV_OK = 0,
+
+ /** the filter needs to read more data before output */
+ BEV_NEED_MORE = 1,
+
+ /** the filter encountered a critical error, no further data
+ can be processed. */
+ BEV_ERROR = 2
+};
+
+/** A callback function to implement a filter for a bufferevent.
+
+ @param src An evbuffer to drain data from.
+ @param dst An evbuffer to add data to.
+ @param limit A suggested upper bound of bytes to write to dst.
+ The filter may ignore this value, but doing so means that
+ it will overflow the high-water mark associated with dst.
+ -1 means "no limit".
+ @param mode Whether we should write data as may be convenient
+ (BEV_NORMAL), or flush as much data as we can (BEV_FLUSH),
+ or flush as much as we can, possibly including an end-of-stream
+ marker (BEV_FINISH).
+ @param ctx A user-supplied pointer.
+
+ @return BEV_OK if we wrote some data; BEV_NEED_MORE if we can't
+ produce any more output until we get some input; and BEV_ERROR
+ on an error.
+ */
+typedef enum bufferevent_filter_result (*bufferevent_filter_cb)(
+ struct evbuffer *src, struct evbuffer *dst, ev_ssize_t dst_limit,
+ enum bufferevent_flush_mode mode, void *ctx);
+
+/**
+ Allocate a new filtering bufferevent on top of an existing bufferevent.
+
+ @param underlying the underlying bufferevent.
+ @param input_filter The filter to apply to data we read from the underlying
+ bufferevent
+ @param output_filter The filer to apply to data we write to the underlying
+ bufferevent
+ @param options A bitfield of bufferevent options.
+ @param free_context A function to use to free the filter context when
+ this bufferevent is freed.
+ @param ctx A context pointer to pass to the filter functions.
+ */
+EVENT2_EXPORT_SYMBOL
+struct bufferevent *
+bufferevent_filter_new(struct bufferevent *underlying,
+ bufferevent_filter_cb input_filter,
+ bufferevent_filter_cb output_filter,
+ int options,
+ void (*free_context)(void *),
+ void *ctx);
+/**@}*/
+
+/**
+ Allocate a pair of linked bufferevents. The bufferevents behave as would
+ two bufferevent_sock instances connected to opposite ends of a
+ socketpair(), except that no internal socketpair is allocated.
+
+ @param base The event base to associate with the socketpair.
+ @param options A set of options for this bufferevent
+ @param pair A pointer to an array to hold the two new bufferevent objects.
+ @return 0 on success, -1 on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_pair_new(struct event_base *base, int options,
+ struct bufferevent *pair[2]);
+
+/**
+ Given one bufferevent returned by bufferevent_pair_new(), returns the
+ other one if it still exists. Otherwise returns NULL.
+ */
+EVENT2_EXPORT_SYMBOL
+struct bufferevent *bufferevent_pair_get_partner(struct bufferevent *bev);
+
+/**
+ Abstract type used to configure rate-limiting on a bufferevent or a group
+ of bufferevents.
+ */
+struct ev_token_bucket_cfg;
+
+/**
+ A group of bufferevents which are configured to respect the same rate
+ limit.
+*/
+struct bufferevent_rate_limit_group;
+
+/** Maximum configurable rate- or burst-limit. */
+#define EV_RATE_LIMIT_MAX EV_SSIZE_MAX
+
+/**
+ Initialize and return a new object to configure the rate-limiting behavior
+ of bufferevents.
+
+ @param read_rate The maximum number of bytes to read per tick on
+ average.
+ @param read_burst The maximum number of bytes to read in any single tick.
+ @param write_rate The maximum number of bytes to write per tick on
+ average.
+ @param write_burst The maximum number of bytes to write in any single tick.
+ @param tick_len The length of a single tick. Defaults to one second.
+ Any fractions of a millisecond are ignored.
+
+ Note that all rate-limits hare are currently best-effort: future versions
+ of Libevent may implement them more tightly.
+ */
+EVENT2_EXPORT_SYMBOL
+struct ev_token_bucket_cfg *ev_token_bucket_cfg_new(
+ size_t read_rate, size_t read_burst,
+ size_t write_rate, size_t write_burst,
+ const struct timeval *tick_len);
+
+/** Free all storage held in 'cfg'.
+
+ Note: 'cfg' is not currently reference-counted; it is not safe to free it
+ until no bufferevent is using it.
+ */
+EVENT2_EXPORT_SYMBOL
+void ev_token_bucket_cfg_free(struct ev_token_bucket_cfg *cfg);
+
+/**
+ Set the rate-limit of a the bufferevent 'bev' to the one specified in
+ 'cfg'. If 'cfg' is NULL, disable any per-bufferevent rate-limiting on
+ 'bev'.
+
+ Note that only some bufferevent types currently respect rate-limiting.
+ They are: socket-based bufferevents (normal and IOCP-based), and SSL-based
+ bufferevents.
+
+ Return 0 on sucess, -1 on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_set_rate_limit(struct bufferevent *bev,
+ struct ev_token_bucket_cfg *cfg);
+
+/**
+ Create a new rate-limit group for bufferevents. A rate-limit group
+ constrains the maximum number of bytes sent and received, in toto,
+ by all of its bufferevents.
+
+ @param base An event_base to run any necessary timeouts for the group.
+ Note that all bufferevents in the group do not necessarily need to share
+ this event_base.
+ @param cfg The rate-limit for this group.
+
+ Note that all rate-limits hare are currently best-effort: future versions
+ of Libevent may implement them more tightly.
+
+ Note also that only some bufferevent types currently respect rate-limiting.
+ They are: socket-based bufferevents (normal and IOCP-based), and SSL-based
+ bufferevents.
+ */
+EVENT2_EXPORT_SYMBOL
+struct bufferevent_rate_limit_group *bufferevent_rate_limit_group_new(
+ struct event_base *base,
+ const struct ev_token_bucket_cfg *cfg);
+/**
+ Change the rate-limiting settings for a given rate-limiting group.
+
+ Return 0 on success, -1 on failure.
+*/
+EVENT2_EXPORT_SYMBOL
+int bufferevent_rate_limit_group_set_cfg(
+ struct bufferevent_rate_limit_group *,
+ const struct ev_token_bucket_cfg *);
+
+/**
+ Change the smallest quantum we're willing to allocate to any single
+ bufferevent in a group for reading or writing at a time.
+
+ The rationale is that, because of TCP/IP protocol overheads and kernel
+ behavior, if a rate-limiting group is so tight on bandwidth that you're
+ only willing to send 1 byte per tick per bufferevent, you might instead
+ want to batch up the reads and writes so that you send N bytes per
+ 1/N of the bufferevents (chosen at random) each tick, so you still wind
+ up send 1 byte per tick per bufferevent on average, but you don't send
+ so many tiny packets.
+
+ The default min-share is currently 64 bytes.
+
+ Returns 0 on success, -1 on faulre.
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_rate_limit_group_set_min_share(
+ struct bufferevent_rate_limit_group *, size_t);
+
+/**
+ Free a rate-limiting group. The group must have no members when
+ this function is called.
+*/
+EVENT2_EXPORT_SYMBOL
+void bufferevent_rate_limit_group_free(struct bufferevent_rate_limit_group *);
+
+/**
+ Add 'bev' to the list of bufferevents whose aggregate reading and writing
+ is restricted by 'g'. If 'g' is NULL, remove 'bev' from its current group.
+
+ A bufferevent may belong to no more than one rate-limit group at a time.
+ If 'bev' is already a member of a group, it will be removed from its old
+ group before being added to 'g'.
+
+ Return 0 on success and -1 on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_add_to_rate_limit_group(struct bufferevent *bev,
+ struct bufferevent_rate_limit_group *g);
+
+/** Remove 'bev' from its current rate-limit group (if any). */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_remove_from_rate_limit_group(struct bufferevent *bev);
+
+/**
+ Set the size limit for single read operation.
+
+ Set to 0 for a reasonable default.
+
+ Return 0 on success and -1 on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_set_max_single_read(struct bufferevent *bev, size_t size);
+
+/**
+ Set the size limit for single write operation.
+
+ Set to 0 for a reasonable default.
+
+ Return 0 on success and -1 on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_set_max_single_write(struct bufferevent *bev, size_t size);
+
+/** Get the current size limit for single read operation. */
+EVENT2_EXPORT_SYMBOL
+ev_ssize_t bufferevent_get_max_single_read(struct bufferevent *bev);
+
+/** Get the current size limit for single write operation. */
+EVENT2_EXPORT_SYMBOL
+ev_ssize_t bufferevent_get_max_single_write(struct bufferevent *bev);
+
+/**
+ @name Rate limit inspection
+
+ Return the current read or write bucket size for a bufferevent.
+ If it is not configured with a per-bufferevent ratelimit, return
+ EV_SSIZE_MAX. This function does not inspect the group limit, if any.
+ Note that it can return a negative value if the bufferevent has been
+ made to read or write more than its limit.
+
+ @{
+ */
+EVENT2_EXPORT_SYMBOL
+ev_ssize_t bufferevent_get_read_limit(struct bufferevent *bev);
+EVENT2_EXPORT_SYMBOL
+ev_ssize_t bufferevent_get_write_limit(struct bufferevent *bev);
+/*@}*/
+
+EVENT2_EXPORT_SYMBOL
+ev_ssize_t bufferevent_get_max_to_read(struct bufferevent *bev);
+EVENT2_EXPORT_SYMBOL
+ev_ssize_t bufferevent_get_max_to_write(struct bufferevent *bev);
+
+EVENT2_EXPORT_SYMBOL
+const struct ev_token_bucket_cfg *bufferevent_get_token_bucket_cfg(const struct bufferevent * bev);
+
+/**
+ @name Group Rate limit inspection
+
+ Return the read or write bucket size for a bufferevent rate limit
+ group. Note that it can return a negative value if bufferevents in
+ the group have been made to read or write more than their limits.
+
+ @{
+ */
+EVENT2_EXPORT_SYMBOL
+ev_ssize_t bufferevent_rate_limit_group_get_read_limit(
+ struct bufferevent_rate_limit_group *);
+EVENT2_EXPORT_SYMBOL
+ev_ssize_t bufferevent_rate_limit_group_get_write_limit(
+ struct bufferevent_rate_limit_group *);
+/*@}*/
+
+/**
+ @name Rate limit manipulation
+
+ Subtract a number of bytes from a bufferevent's read or write bucket.
+ The decrement value can be negative, if you want to manually refill
+ the bucket. If the change puts the bucket above or below zero, the
+ bufferevent will resume or suspend reading writing as appropriate.
+ These functions make no change in the buckets for the bufferevent's
+ group, if any.
+
+ Returns 0 on success, -1 on internal error.
+
+ @{
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_decrement_read_limit(struct bufferevent *bev, ev_ssize_t decr);
+EVENT2_EXPORT_SYMBOL
+int bufferevent_decrement_write_limit(struct bufferevent *bev, ev_ssize_t decr);
+/*@}*/
+
+/**
+ @name Group rate limit manipulation
+
+ Subtract a number of bytes from a bufferevent rate-limiting group's
+ read or write bucket. The decrement value can be negative, if you
+ want to manually refill the bucket. If the change puts the bucket
+ above or below zero, the bufferevents in the group will resume or
+ suspend reading writing as appropriate.
+
+ Returns 0 on success, -1 on internal error.
+
+ @{
+ */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_rate_limit_group_decrement_read(
+ struct bufferevent_rate_limit_group *, ev_ssize_t);
+EVENT2_EXPORT_SYMBOL
+int bufferevent_rate_limit_group_decrement_write(
+ struct bufferevent_rate_limit_group *, ev_ssize_t);
+/*@}*/
+
+
+/**
+ * Inspect the total bytes read/written on a group.
+ *
+ * Set the variable pointed to by total_read_out to the total number of bytes
+ * ever read on grp, and the variable pointed to by total_written_out to the
+ * total number of bytes ever written on grp. */
+EVENT2_EXPORT_SYMBOL
+void bufferevent_rate_limit_group_get_totals(
+ struct bufferevent_rate_limit_group *grp,
+ ev_uint64_t *total_read_out, ev_uint64_t *total_written_out);
+
+/**
+ * Reset the total bytes read/written on a group.
+ *
+ * Reset the number of bytes read or written on grp as given by
+ * bufferevent_rate_limit_group_reset_totals(). */
+EVENT2_EXPORT_SYMBOL
+void
+bufferevent_rate_limit_group_reset_totals(
+ struct bufferevent_rate_limit_group *grp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT2_BUFFEREVENT_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/bufferevent_compat.h b/libs/libevent/include/event2/bufferevent_compat.h
new file mode 100644
index 0000000000..65482042f9
--- /dev/null
+++ b/libs/libevent/include/event2/bufferevent_compat.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos, Nick Mathewson
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_BUFFEREVENT_COMPAT_H_INCLUDED_
+#define EVENT2_BUFFEREVENT_COMPAT_H_INCLUDED_
+
+#define evbuffercb bufferevent_data_cb
+#define everrorcb bufferevent_event_cb
+
+/**
+ Create a new bufferevent for an fd.
+
+ This function is deprecated. Use bufferevent_socket_new and
+ bufferevent_set_callbacks instead.
+
+ Libevent provides an abstraction on top of the regular event callbacks.
+ This abstraction is called a buffered event. A buffered event provides
+ input and output buffers that get filled and drained automatically. The
+ user of a buffered event no longer deals directly with the I/O, but
+ instead is reading from input and writing to output buffers.
+
+ Once initialized, the bufferevent structure can be used repeatedly with
+ bufferevent_enable() and bufferevent_disable().
+
+ When read enabled the bufferevent will try to read from the file descriptor
+ and call the read callback. The write callback is executed whenever the
+ output buffer is drained below the write low watermark, which is 0 by
+ default.
+
+ If multiple bases are in use, bufferevent_base_set() must be called before
+ enabling the bufferevent for the first time.
+
+ @deprecated This function is deprecated because it uses the current
+ event base, and as such can be error prone for multithreaded programs.
+ Use bufferevent_socket_new() instead.
+
+ @param fd the file descriptor from which data is read and written to.
+ This file descriptor is not allowed to be a pipe(2).
+ @param readcb callback to invoke when there is data to be read, or NULL if
+ no callback is desired
+ @param writecb callback to invoke when the file descriptor is ready for
+ writing, or NULL if no callback is desired
+ @param errorcb callback to invoke when there is an error on the file
+ descriptor
+ @param cbarg an argument that will be supplied to each of the callbacks
+ (readcb, writecb, and errorcb)
+ @return a pointer to a newly allocated bufferevent struct, or NULL if an
+ error occurred
+ @see bufferevent_base_set(), bufferevent_free()
+ */
+struct bufferevent *bufferevent_new(evutil_socket_t fd,
+ evbuffercb readcb, evbuffercb writecb, everrorcb errorcb, void *cbarg);
+
+
+/**
+ Set the read and write timeout for a buffered event.
+
+ @param bufev the bufferevent to be modified
+ @param timeout_read the read timeout
+ @param timeout_write the write timeout
+ */
+void bufferevent_settimeout(struct bufferevent *bufev,
+ int timeout_read, int timeout_write);
+
+#define EVBUFFER_READ BEV_EVENT_READING
+#define EVBUFFER_WRITE BEV_EVENT_WRITING
+#define EVBUFFER_EOF BEV_EVENT_EOF
+#define EVBUFFER_ERROR BEV_EVENT_ERROR
+#define EVBUFFER_TIMEOUT BEV_EVENT_TIMEOUT
+
+/** macro for getting access to the input buffer of a bufferevent */
+#define EVBUFFER_INPUT(x) bufferevent_get_input(x)
+/** macro for getting access to the output buffer of a bufferevent */
+#define EVBUFFER_OUTPUT(x) bufferevent_get_output(x)
+
+#endif
diff --git a/libs/libevent/include/event2/bufferevent_ssl.h b/libs/libevent/include/event2/bufferevent_ssl.h
new file mode 100644
index 0000000000..bf39b844a9
--- /dev/null
+++ b/libs/libevent/include/event2/bufferevent_ssl.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_BUFFEREVENT_SSL_H_INCLUDED_
+#define EVENT2_BUFFEREVENT_SSL_H_INCLUDED_
+
+/** @file event2/bufferevent_ssl.h
+
+ OpenSSL support for bufferevents.
+ */
+#include <event2/visibility.h>
+#include <event2/event-config.h>
+#include <event2/bufferevent.h>
+#include <event2/util.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* This is what openssl's SSL objects are underneath. */
+struct ssl_st;
+
+/**
+ The state of an SSL object to be used when creating a new
+ SSL bufferevent.
+ */
+enum bufferevent_ssl_state {
+ BUFFEREVENT_SSL_OPEN = 0,
+ BUFFEREVENT_SSL_CONNECTING = 1,
+ BUFFEREVENT_SSL_ACCEPTING = 2
+};
+
+#if defined(EVENT__HAVE_OPENSSL) || defined(EVENT_IN_DOXYGEN_)
+/**
+ Create a new SSL bufferevent to send its data over another bufferevent.
+
+ @param base An event_base to use to detect reading and writing. It
+ must also be the base for the underlying bufferevent.
+ @param underlying A socket to use for this SSL
+ @param ssl A SSL* object from openssl.
+ @param state The current state of the SSL connection
+ @param options One or more bufferevent_options
+ @return A new bufferevent on success, or NULL on failure
+*/
+EVENT2_EXPORT_SYMBOL
+struct bufferevent *
+bufferevent_openssl_filter_new(struct event_base *base,
+ struct bufferevent *underlying,
+ struct ssl_st *ssl,
+ enum bufferevent_ssl_state state,
+ int options);
+
+/**
+ Create a new SSL bufferevent to send its data over an SSL * on a socket.
+
+ @param base An event_base to use to detect reading and writing
+ @param fd A socket to use for this SSL
+ @param ssl A SSL* object from openssl.
+ @param state The current state of the SSL connection
+ @param options One or more bufferevent_options
+ @return A new bufferevent on success, or NULL on failure.
+*/
+EVENT2_EXPORT_SYMBOL
+struct bufferevent *
+bufferevent_openssl_socket_new(struct event_base *base,
+ evutil_socket_t fd,
+ struct ssl_st *ssl,
+ enum bufferevent_ssl_state state,
+ int options);
+
+/** Control how to report dirty SSL shutdowns.
+
+ If the peer (or the network, or an attacker) closes the TCP
+ connection before closing the SSL channel, and the protocol is SSL >= v3,
+ this is a "dirty" shutdown. If allow_dirty_shutdown is 0 (default),
+ this is reported as BEV_EVENT_ERROR.
+
+ If instead allow_dirty_shutdown=1, a dirty shutdown is reported as
+ BEV_EVENT_EOF.
+
+ (Note that if the protocol is < SSLv3, you will always receive
+ BEV_EVENT_EOF, since SSL 2 and earlier cannot distinguish a secure
+ connection close from a dirty one. This is one reason (among many)
+ not to use SSL 2.)
+*/
+
+EVENT2_EXPORT_SYMBOL
+int bufferevent_openssl_get_allow_dirty_shutdown(struct bufferevent *bev);
+EVENT2_EXPORT_SYMBOL
+void bufferevent_openssl_set_allow_dirty_shutdown(struct bufferevent *bev,
+ int allow_dirty_shutdown);
+
+/** Return the underlying openssl SSL * object for an SSL bufferevent. */
+EVENT2_EXPORT_SYMBOL
+struct ssl_st *
+bufferevent_openssl_get_ssl(struct bufferevent *bufev);
+
+/** Tells a bufferevent to begin SSL renegotiation. */
+EVENT2_EXPORT_SYMBOL
+int bufferevent_ssl_renegotiate(struct bufferevent *bev);
+
+/** Return the most recent OpenSSL error reported on an SSL bufferevent. */
+EVENT2_EXPORT_SYMBOL
+unsigned long bufferevent_get_openssl_error(struct bufferevent *bev);
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT2_BUFFEREVENT_SSL_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/bufferevent_struct.h b/libs/libevent/include/event2/bufferevent_struct.h
new file mode 100644
index 0000000000..e84c082c30
--- /dev/null
+++ b/libs/libevent/include/event2/bufferevent_struct.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_BUFFEREVENT_STRUCT_H_INCLUDED_
+#define EVENT2_BUFFEREVENT_STRUCT_H_INCLUDED_
+
+/** @file event2/bufferevent_struct.h
+
+ Data structures for bufferevents. Using these structures may hurt forward
+ compatibility with later versions of Libevent: be careful!
+
+ @deprecated Use of bufferevent_struct.h is completely deprecated; these
+ structures are only exposed for backward compatibility with programs
+ written before Libevent 2.0 that used them.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <event2/event-config.h>
+#ifdef EVENT__HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+/* For int types. */
+#include <event2/util.h>
+/* For struct event */
+#include <event2/event_struct.h>
+
+struct event_watermark {
+ size_t low;
+ size_t high;
+};
+
+/**
+ Shared implementation of a bufferevent.
+
+ This type is exposed only because it was exposed in previous versions,
+ and some people's code may rely on manipulating it. Otherwise, you
+ should really not rely on the layout, size, or contents of this structure:
+ it is fairly volatile, and WILL change in future versions of the code.
+**/
+struct bufferevent {
+ /** Event base for which this bufferevent was created. */
+ struct event_base *ev_base;
+ /** Pointer to a table of function pointers to set up how this
+ bufferevent behaves. */
+ const struct bufferevent_ops *be_ops;
+
+ /** A read event that triggers when a timeout has happened or a socket
+ is ready to read data. Only used by some subtypes of
+ bufferevent. */
+ struct event ev_read;
+ /** A write event that triggers when a timeout has happened or a socket
+ is ready to write data. Only used by some subtypes of
+ bufferevent. */
+ struct event ev_write;
+
+ /** An input buffer. Only the bufferevent is allowed to add data to
+ this buffer, though the user is allowed to drain it. */
+ struct evbuffer *input;
+
+ /** An input buffer. Only the bufferevent is allowed to drain data
+ from this buffer, though the user is allowed to add it. */
+ struct evbuffer *output;
+
+ struct event_watermark wm_read;
+ struct event_watermark wm_write;
+
+ bufferevent_data_cb readcb;
+ bufferevent_data_cb writecb;
+ /* This should be called 'eventcb', but renaming it would break
+ * backward compatibility */
+ bufferevent_event_cb errorcb;
+ void *cbarg;
+
+ struct timeval timeout_read;
+ struct timeval timeout_write;
+
+ /** Events that are currently enabled: currently EV_READ and EV_WRITE
+ are supported. */
+ short enabled;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT2_BUFFEREVENT_STRUCT_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/dns.h b/libs/libevent/include/event2/dns.h
new file mode 100644
index 0000000000..17cd86a2ec
--- /dev/null
+++ b/libs/libevent/include/event2/dns.h
@@ -0,0 +1,717 @@
+/*
+ * Copyright (c) 2006-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * The original DNS code is due to Adam Langley with heavy
+ * modifications by Nick Mathewson. Adam put his DNS software in the
+ * public domain. You can find his original copyright below. Please,
+ * aware that the code as part of Libevent is governed by the 3-clause
+ * BSD license above.
+ *
+ * This software is Public Domain. To view a copy of the public domain dedication,
+ * visit http://creativecommons.org/licenses/publicdomain/ or send a letter to
+ * Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
+ *
+ * I ask and expect, but do not require, that all derivative works contain an
+ * attribution similar to:
+ * Parts developed by Adam Langley <agl@imperialviolet.org>
+ *
+ * You may wish to replace the word "Parts" with something else depending on
+ * the amount of original code.
+ *
+ * (Derivative works does not include programs which link against, run or include
+ * the source verbatim in their source distributions)
+ */
+
+/** @file event2/dns.h
+ *
+ * Welcome, gentle reader
+ *
+ * Async DNS lookups are really a whole lot harder than they should be,
+ * mostly stemming from the fact that the libc resolver has never been
+ * very good at them. Before you use this library you should see if libc
+ * can do the job for you with the modern async call getaddrinfo_a
+ * (see http://www.imperialviolet.org/page25.html#e498). Otherwise,
+ * please continue.
+ *
+ * The library keeps track of the state of nameservers and will avoid
+ * them when they go down. Otherwise it will round robin between them.
+ *
+ * Quick start guide:
+ * #include "evdns.h"
+ * void callback(int result, char type, int count, int ttl,
+ * void *addresses, void *arg);
+ * evdns_resolv_conf_parse(DNS_OPTIONS_ALL, "/etc/resolv.conf");
+ * evdns_resolve("www.hostname.com", 0, callback, NULL);
+ *
+ * When the lookup is complete the callback function is called. The
+ * first argument will be one of the DNS_ERR_* defines in evdns.h.
+ * Hopefully it will be DNS_ERR_NONE, in which case type will be
+ * DNS_IPv4_A, count will be the number of IP addresses, ttl is the time
+ * which the data can be cached for (in seconds), addresses will point
+ * to an array of uint32_t's and arg will be whatever you passed to
+ * evdns_resolve.
+ *
+ * Searching:
+ *
+ * In order for this library to be a good replacement for glibc's resolver it
+ * supports searching. This involves setting a list of default domains, in
+ * which names will be queried for. The number of dots in the query name
+ * determines the order in which this list is used.
+ *
+ * Searching appears to be a single lookup from the point of view of the API,
+ * although many DNS queries may be generated from a single call to
+ * evdns_resolve. Searching can also drastically slow down the resolution
+ * of names.
+ *
+ * To disable searching:
+ * 1. Never set it up. If you never call evdns_resolv_conf_parse or
+ * evdns_search_add then no searching will occur.
+ *
+ * 2. If you do call evdns_resolv_conf_parse then don't pass
+ * DNS_OPTION_SEARCH (or DNS_OPTIONS_ALL, which implies it).
+ *
+ * 3. When calling evdns_resolve, pass the DNS_QUERY_NO_SEARCH flag.
+ *
+ * The order of searches depends on the number of dots in the name. If the
+ * number is greater than the ndots setting then the names is first tried
+ * globally. Otherwise each search domain is appended in turn.
+ *
+ * The ndots setting can either be set from a resolv.conf, or by calling
+ * evdns_search_ndots_set.
+ *
+ * For example, with ndots set to 1 (the default) and a search domain list of
+ * ["myhome.net"]:
+ * Query: www
+ * Order: www.myhome.net, www.
+ *
+ * Query: www.abc
+ * Order: www.abc., www.abc.myhome.net
+ *
+ * Internals:
+ *
+ * Requests are kept in two queues. The first is the inflight queue. In
+ * this queue requests have an allocated transaction id and nameserver.
+ * They will soon be transmitted if they haven't already been.
+ *
+ * The second is the waiting queue. The size of the inflight ring is
+ * limited and all other requests wait in waiting queue for space. This
+ * bounds the number of concurrent requests so that we don't flood the
+ * nameserver. Several algorithms require a full walk of the inflight
+ * queue and so bounding its size keeps thing going nicely under huge
+ * (many thousands of requests) loads.
+ *
+ * If a nameserver loses too many requests it is considered down and we
+ * try not to use it. After a while we send a probe to that nameserver
+ * (a lookup for google.com) and, if it replies, we consider it working
+ * again. If the nameserver fails a probe we wait longer to try again
+ * with the next probe.
+ */
+
+#ifndef EVENT2_DNS_H_INCLUDED_
+#define EVENT2_DNS_H_INCLUDED_
+
+#include <event2/visibility.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* For integer types. */
+#include <event2/util.h>
+
+/** Error codes 0-5 are as described in RFC 1035. */
+#define DNS_ERR_NONE 0
+/** The name server was unable to interpret the query */
+#define DNS_ERR_FORMAT 1
+/** The name server was unable to process this query due to a problem with the
+ * name server */
+#define DNS_ERR_SERVERFAILED 2
+/** The domain name does not exist */
+#define DNS_ERR_NOTEXIST 3
+/** The name server does not support the requested kind of query */
+#define DNS_ERR_NOTIMPL 4
+/** The name server refuses to reform the specified operation for policy
+ * reasons */
+#define DNS_ERR_REFUSED 5
+/** The reply was truncated or ill-formatted */
+#define DNS_ERR_TRUNCATED 65
+/** An unknown error occurred */
+#define DNS_ERR_UNKNOWN 66
+/** Communication with the server timed out */
+#define DNS_ERR_TIMEOUT 67
+/** The request was canceled because the DNS subsystem was shut down. */
+#define DNS_ERR_SHUTDOWN 68
+/** The request was canceled via a call to evdns_cancel_request */
+#define DNS_ERR_CANCEL 69
+/** There were no answers and no error condition in the DNS packet.
+ * This can happen when you ask for an address that exists, but a record
+ * type that doesn't. */
+#define DNS_ERR_NODATA 70
+
+#define DNS_IPv4_A 1
+#define DNS_PTR 2
+#define DNS_IPv6_AAAA 3
+
+#define DNS_QUERY_NO_SEARCH 1
+
+#define DNS_OPTION_SEARCH 1
+#define DNS_OPTION_NAMESERVERS 2
+#define DNS_OPTION_MISC 4
+#define DNS_OPTION_HOSTSFILE 8
+#define DNS_OPTIONS_ALL 15
+
+/* Obsolete name for DNS_QUERY_NO_SEARCH */
+#define DNS_NO_SEARCH DNS_QUERY_NO_SEARCH
+
+/**
+ * The callback that contains the results from a lookup.
+ * - result is one of the DNS_ERR_* values (DNS_ERR_NONE for success)
+ * - type is either DNS_IPv4_A or DNS_PTR or DNS_IPv6_AAAA
+ * - count contains the number of addresses of form type
+ * - ttl is the number of seconds the resolution may be cached for.
+ * - addresses needs to be cast according to type. It will be an array of
+ * 4-byte sequences for ipv4, or an array of 16-byte sequences for ipv6,
+ * or a nul-terminated string for PTR.
+ */
+typedef void (*evdns_callback_type) (int result, char type, int count, int ttl, void *addresses, void *arg);
+
+struct evdns_base;
+struct event_base;
+
+/** Flag for evdns_base_new: process resolv.conf. */
+#define EVDNS_BASE_INITIALIZE_NAMESERVERS 1
+/** Flag for evdns_base_new: Do not prevent the libevent event loop from
+ * exiting when we have no active dns requests. */
+#define EVDNS_BASE_DISABLE_WHEN_INACTIVE 0x8000
+
+/**
+ Initialize the asynchronous DNS library.
+
+ This function initializes support for non-blocking name resolution by
+ calling evdns_resolv_conf_parse() on UNIX and
+ evdns_config_windows_nameservers() on Windows.
+
+ @param event_base the event base to associate the dns client with
+ @param flags any of EVDNS_BASE_INITIALIZE_NAMESERVERS|
+ EVDNS_BASE_DISABLE_WHEN_INACTIVE
+ @return evdns_base object if successful, or NULL if an error occurred.
+ @see evdns_base_free()
+ */
+EVENT2_EXPORT_SYMBOL
+struct evdns_base * evdns_base_new(struct event_base *event_base, int initialize_nameservers);
+
+
+/**
+ Shut down the asynchronous DNS resolver and terminate all active requests.
+
+ If the 'fail_requests' option is enabled, all active requests will return
+ an empty result with the error flag set to DNS_ERR_SHUTDOWN. Otherwise,
+ the requests will be silently discarded.
+
+ @param evdns_base the evdns base to free
+ @param fail_requests if zero, active requests will be aborted; if non-zero,
+ active requests will return DNS_ERR_SHUTDOWN.
+ @see evdns_base_new()
+ */
+EVENT2_EXPORT_SYMBOL
+void evdns_base_free(struct evdns_base *base, int fail_requests);
+
+/**
+ Remove all hosts entries that have been loaded into the event_base via
+ evdns_base_load_hosts or via event_base_resolv_conf_parse.
+
+ @param evdns_base the evdns base to remove outdated host addresses from
+ */
+EVENT2_EXPORT_SYMBOL
+void evdns_base_clear_host_addresses(struct evdns_base *base);
+
+/**
+ Convert a DNS error code to a string.
+
+ @param err the DNS error code
+ @return a string containing an explanation of the error code
+*/
+EVENT2_EXPORT_SYMBOL
+const char *evdns_err_to_string(int err);
+
+
+/**
+ Add a nameserver.
+
+ The address should be an IPv4 address in network byte order.
+ The type of address is chosen so that it matches in_addr.s_addr.
+
+ @param base the evdns_base to which to add the name server
+ @param address an IP address in network byte order
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_base_nameserver_ip_add()
+ */
+EVENT2_EXPORT_SYMBOL
+int evdns_base_nameserver_add(struct evdns_base *base,
+ unsigned long int address);
+
+/**
+ Get the number of configured nameservers.
+
+ This returns the number of configured nameservers (not necessarily the
+ number of running nameservers). This is useful for double-checking
+ whether our calls to the various nameserver configuration functions
+ have been successful.
+
+ @param base the evdns_base to which to apply this operation
+ @return the number of configured nameservers
+ @see evdns_base_nameserver_add()
+ */
+EVENT2_EXPORT_SYMBOL
+int evdns_base_count_nameservers(struct evdns_base *base);
+
+/**
+ Remove all configured nameservers, and suspend all pending resolves.
+
+ Resolves will not necessarily be re-attempted until evdns_base_resume() is called.
+
+ @param base the evdns_base to which to apply this operation
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_base_resume()
+ */
+EVENT2_EXPORT_SYMBOL
+int evdns_base_clear_nameservers_and_suspend(struct evdns_base *base);
+
+
+/**
+ Resume normal operation and continue any suspended resolve requests.
+
+ Re-attempt resolves left in limbo after an earlier call to
+ evdns_base_clear_nameservers_and_suspend().
+
+ @param base the evdns_base to which to apply this operation
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_base_clear_nameservers_and_suspend()
+ */
+EVENT2_EXPORT_SYMBOL
+int evdns_base_resume(struct evdns_base *base);
+
+/**
+ Add a nameserver by string address.
+
+ This function parses a n IPv4 or IPv6 address from a string and adds it as a
+ nameserver. It supports the following formats:
+ - [IPv6Address]:port
+ - [IPv6Address]
+ - IPv6Address
+ - IPv4Address:port
+ - IPv4Address
+
+ If no port is specified, it defaults to 53.
+
+ @param base the evdns_base to which to apply this operation
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_base_nameserver_add()
+ */
+EVENT2_EXPORT_SYMBOL
+int evdns_base_nameserver_ip_add(struct evdns_base *base,
+ const char *ip_as_string);
+
+/**
+ Add a nameserver by sockaddr.
+ **/
+EVENT2_EXPORT_SYMBOL
+int
+evdns_base_nameserver_sockaddr_add(struct evdns_base *base,
+ const struct sockaddr *sa, ev_socklen_t len, unsigned flags);
+
+struct evdns_request;
+
+/**
+ Lookup an A record for a given name.
+
+ @param base the evdns_base to which to apply this operation
+ @param name a DNS hostname
+ @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
+ @param callback a callback function to invoke when the request is completed
+ @param ptr an argument to pass to the callback function
+ @return an evdns_request object if successful, or NULL if an error occurred.
+ @see evdns_resolve_ipv6(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6(), evdns_cancel_request()
+ */
+EVENT2_EXPORT_SYMBOL
+struct evdns_request *evdns_base_resolve_ipv4(struct evdns_base *base, const char *name, int flags, evdns_callback_type callback, void *ptr);
+
+/**
+ Lookup an AAAA record for a given name.
+
+ @param base the evdns_base to which to apply this operation
+ @param name a DNS hostname
+ @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
+ @param callback a callback function to invoke when the request is completed
+ @param ptr an argument to pass to the callback function
+ @return an evdns_request object if successful, or NULL if an error occurred.
+ @see evdns_resolve_ipv4(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6(), evdns_cancel_request()
+ */
+EVENT2_EXPORT_SYMBOL
+struct evdns_request *evdns_base_resolve_ipv6(struct evdns_base *base, const char *name, int flags, evdns_callback_type callback, void *ptr);
+
+struct in_addr;
+struct in6_addr;
+
+/**
+ Lookup a PTR record for a given IP address.
+
+ @param base the evdns_base to which to apply this operation
+ @param in an IPv4 address
+ @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
+ @param callback a callback function to invoke when the request is completed
+ @param ptr an argument to pass to the callback function
+ @return an evdns_request object if successful, or NULL if an error occurred.
+ @see evdns_resolve_reverse_ipv6(), evdns_cancel_request()
+ */
+EVENT2_EXPORT_SYMBOL
+struct evdns_request *evdns_base_resolve_reverse(struct evdns_base *base, const struct in_addr *in, int flags, evdns_callback_type callback, void *ptr);
+
+
+/**
+ Lookup a PTR record for a given IPv6 address.
+
+ @param base the evdns_base to which to apply this operation
+ @param in an IPv6 address
+ @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
+ @param callback a callback function to invoke when the request is completed
+ @param ptr an argument to pass to the callback function
+ @return an evdns_request object if successful, or NULL if an error occurred.
+ @see evdns_resolve_reverse_ipv6(), evdns_cancel_request()
+ */
+EVENT2_EXPORT_SYMBOL
+struct evdns_request *evdns_base_resolve_reverse_ipv6(struct evdns_base *base, const struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr);
+
+/**
+ Cancels a pending DNS resolution request.
+
+ @param base the evdns_base that was used to make the request
+ @param req the evdns_request that was returned by calling a resolve function
+ @see evdns_base_resolve_ipv4(), evdns_base_resolve_ipv6, evdns_base_resolve_reverse
+*/
+EVENT2_EXPORT_SYMBOL
+void evdns_cancel_request(struct evdns_base *base, struct evdns_request *req);
+
+/**
+ Set the value of a configuration option.
+
+ The currently available configuration options are:
+
+ ndots, timeout, max-timeouts, max-inflight, attempts, randomize-case,
+ bind-to, initial-probe-timeout, getaddrinfo-allow-skew.
+
+ In versions before Libevent 2.0.3-alpha, the option name needed to end with
+ a colon.
+
+ @param base the evdns_base to which to apply this operation
+ @param option the name of the configuration option to be modified
+ @param val the value to be set
+ @return 0 if successful, or -1 if an error occurred
+ */
+EVENT2_EXPORT_SYMBOL
+int evdns_base_set_option(struct evdns_base *base, const char *option, const char *val);
+
+
+/**
+ Parse a resolv.conf file.
+
+ The 'flags' parameter determines what information is parsed from the
+ resolv.conf file. See the man page for resolv.conf for the format of this
+ file.
+
+ The following directives are not parsed from the file: sortlist, rotate,
+ no-check-names, inet6, debug.
+
+ If this function encounters an error, the possible return values are: 1 =
+ failed to open file, 2 = failed to stat file, 3 = file too large, 4 = out of
+ memory, 5 = short read from file, 6 = no nameservers listed in the file
+
+ @param base the evdns_base to which to apply this operation
+ @param flags any of DNS_OPTION_NAMESERVERS|DNS_OPTION_SEARCH|DNS_OPTION_MISC|
+ DNS_OPTION_HOSTSFILE|DNS_OPTIONS_ALL
+ @param filename the path to the resolv.conf file
+ @return 0 if successful, or various positive error codes if an error
+ occurred (see above)
+ @see resolv.conf(3), evdns_config_windows_nameservers()
+ */
+EVENT2_EXPORT_SYMBOL
+int evdns_base_resolv_conf_parse(struct evdns_base *base, int flags, const char *const filename);
+
+/**
+ Load an /etc/hosts-style file from 'hosts_fname' into 'base'.
+
+ If hosts_fname is NULL, add minimal entries for localhost, and nothing
+ else.
+
+ Note that only evdns_getaddrinfo uses the /etc/hosts entries.
+
+ This function does not replace previously loaded hosts entries; to do that,
+ call evdns_base_clear_host_addresses first.
+
+ Return 0 on success, negative on failure.
+*/
+EVENT2_EXPORT_SYMBOL
+int evdns_base_load_hosts(struct evdns_base *base, const char *hosts_fname);
+
+/**
+ Obtain nameserver information using the Windows API.
+
+ Attempt to configure a set of nameservers based on platform settings on
+ a win32 host. Preferentially tries to use GetNetworkParams; if that fails,
+ looks in the registry.
+
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_resolv_conf_parse()
+ */
+#ifdef _WIN32
+EVENT2_EXPORT_SYMBOL
+int evdns_base_config_windows_nameservers(struct evdns_base *);
+#define EVDNS_BASE_CONFIG_WINDOWS_NAMESERVERS_IMPLEMENTED
+#endif
+
+
+/**
+ Clear the list of search domains.
+ */
+EVENT2_EXPORT_SYMBOL
+void evdns_base_search_clear(struct evdns_base *base);
+
+
+/**
+ Add a domain to the list of search domains
+
+ @param domain the domain to be added to the search list
+ */
+EVENT2_EXPORT_SYMBOL
+void evdns_base_search_add(struct evdns_base *base, const char *domain);
+
+
+/**
+ Set the 'ndots' parameter for searches.
+
+ Sets the number of dots which, when found in a name, causes
+ the first query to be without any search domain.
+
+ @param ndots the new ndots parameter
+ */
+EVENT2_EXPORT_SYMBOL
+void evdns_base_search_ndots_set(struct evdns_base *base, const int ndots);
+
+/**
+ A callback that is invoked when a log message is generated
+
+ @param is_warning indicates if the log message is a 'warning'
+ @param msg the content of the log message
+ */
+typedef void (*evdns_debug_log_fn_type)(int is_warning, const char *msg);
+
+
+/**
+ Set the callback function to handle DNS log messages. If this
+ callback is not set, evdns log messages are handled with the regular
+ Libevent logging system.
+
+ @param fn the callback to be invoked when a log message is generated
+ */
+EVENT2_EXPORT_SYMBOL
+void evdns_set_log_fn(evdns_debug_log_fn_type fn);
+
+/**
+ Set a callback that will be invoked to generate transaction IDs. By
+ default, we pick transaction IDs based on the current clock time, which
+ is bad for security.
+
+ @param fn the new callback, or NULL to use the default.
+
+ NOTE: This function has no effect in Libevent 2.0.4-alpha and later,
+ since Libevent now provides its own secure RNG.
+ */
+EVENT2_EXPORT_SYMBOL
+void evdns_set_transaction_id_fn(ev_uint16_t (*fn)(void));
+
+/**
+ Set a callback used to generate random bytes. By default, we use
+ the same function as passed to evdns_set_transaction_id_fn to generate
+ bytes two at a time. If a function is provided here, it's also used
+ to generate transaction IDs.
+
+ NOTE: This function has no effect in Libevent 2.0.4-alpha and later,
+ since Libevent now provides its own secure RNG.
+*/
+EVENT2_EXPORT_SYMBOL
+void evdns_set_random_bytes_fn(void (*fn)(char *, size_t));
+
+/*
+ * Functions used to implement a DNS server.
+ */
+
+struct evdns_server_request;
+struct evdns_server_question;
+
+/**
+ A callback to implement a DNS server. The callback function receives a DNS
+ request. It should then optionally add a number of answers to the reply
+ using the evdns_server_request_add_*_reply functions, before calling either
+ evdns_server_request_respond to send the reply back, or
+ evdns_server_request_drop to decline to answer the request.
+
+ @param req A newly received request
+ @param user_data A pointer that was passed to
+ evdns_add_server_port_with_base().
+ */
+typedef void (*evdns_request_callback_fn_type)(struct evdns_server_request *, void *);
+#define EVDNS_ANSWER_SECTION 0
+#define EVDNS_AUTHORITY_SECTION 1
+#define EVDNS_ADDITIONAL_SECTION 2
+
+#define EVDNS_TYPE_A 1
+#define EVDNS_TYPE_NS 2
+#define EVDNS_TYPE_CNAME 5
+#define EVDNS_TYPE_SOA 6
+#define EVDNS_TYPE_PTR 12
+#define EVDNS_TYPE_MX 15
+#define EVDNS_TYPE_TXT 16
+#define EVDNS_TYPE_AAAA 28
+
+#define EVDNS_QTYPE_AXFR 252
+#define EVDNS_QTYPE_ALL 255
+
+#define EVDNS_CLASS_INET 1
+
+/* flags that can be set in answers; as part of the err parameter */
+#define EVDNS_FLAGS_AA 0x400
+#define EVDNS_FLAGS_RD 0x080
+
+/** Create a new DNS server port.
+
+ @param base The event base to handle events for the server port.
+ @param socket A UDP socket to accept DNS requests.
+ @param flags Always 0 for now.
+ @param callback A function to invoke whenever we get a DNS request
+ on the socket.
+ @param user_data Data to pass to the callback.
+ @return an evdns_server_port structure for this server port.
+ */
+EVENT2_EXPORT_SYMBOL
+struct evdns_server_port *evdns_add_server_port_with_base(struct event_base *base, evutil_socket_t socket, int flags, evdns_request_callback_fn_type callback, void *user_data);
+/** Close down a DNS server port, and free associated structures. */
+EVENT2_EXPORT_SYMBOL
+void evdns_close_server_port(struct evdns_server_port *port);
+
+/** Sets some flags in a reply we're building.
+ Allows setting of the AA or RD flags
+ */
+EVENT2_EXPORT_SYMBOL
+void evdns_server_request_set_flags(struct evdns_server_request *req, int flags);
+
+/* Functions to add an answer to an in-progress DNS reply.
+ */
+EVENT2_EXPORT_SYMBOL
+int evdns_server_request_add_reply(struct evdns_server_request *req, int section, const char *name, int type, int dns_class, int ttl, int datalen, int is_name, const char *data);
+EVENT2_EXPORT_SYMBOL
+int evdns_server_request_add_a_reply(struct evdns_server_request *req, const char *name, int n, const void *addrs, int ttl);
+EVENT2_EXPORT_SYMBOL
+int evdns_server_request_add_aaaa_reply(struct evdns_server_request *req, const char *name, int n, const void *addrs, int ttl);
+EVENT2_EXPORT_SYMBOL
+int evdns_server_request_add_ptr_reply(struct evdns_server_request *req, struct in_addr *in, const char *inaddr_name, const char *hostname, int ttl);
+EVENT2_EXPORT_SYMBOL
+int evdns_server_request_add_cname_reply(struct evdns_server_request *req, const char *name, const char *cname, int ttl);
+
+/**
+ Send back a response to a DNS request, and free the request structure.
+*/
+EVENT2_EXPORT_SYMBOL
+int evdns_server_request_respond(struct evdns_server_request *req, int err);
+/**
+ Free a DNS request without sending back a reply.
+*/
+EVENT2_EXPORT_SYMBOL
+int evdns_server_request_drop(struct evdns_server_request *req);
+struct sockaddr;
+/**
+ Get the address that made a DNS request.
+ */
+EVENT2_EXPORT_SYMBOL
+int evdns_server_request_get_requesting_addr(struct evdns_server_request *req, struct sockaddr *sa, int addr_len);
+
+/** Callback for evdns_getaddrinfo. */
+typedef void (*evdns_getaddrinfo_cb)(int result, struct evutil_addrinfo *res, void *arg);
+
+struct evdns_base;
+struct evdns_getaddrinfo_request;
+/** Make a non-blocking getaddrinfo request using the dns_base in 'dns_base'.
+ *
+ * If we can answer the request immediately (with an error or not!), then we
+ * invoke cb immediately and return NULL. Otherwise we return
+ * an evdns_getaddrinfo_request and invoke cb later.
+ *
+ * When the callback is invoked, we pass as its first argument the error code
+ * that getaddrinfo would return (or 0 for no error). As its second argument,
+ * we pass the evutil_addrinfo structures we found (or NULL on error). We
+ * pass 'arg' as the third argument.
+ *
+ * Limitations:
+ *
+ * - The AI_V4MAPPED and AI_ALL flags are not currently implemented.
+ * - For ai_socktype, we only handle SOCKTYPE_STREAM, SOCKTYPE_UDP, and 0.
+ * - For ai_protocol, we only handle IPPROTO_TCP, IPPROTO_UDP, and 0.
+ */
+EVENT2_EXPORT_SYMBOL
+struct evdns_getaddrinfo_request *evdns_getaddrinfo(
+ struct evdns_base *dns_base,
+ const char *nodename, const char *servname,
+ const struct evutil_addrinfo *hints_in,
+ evdns_getaddrinfo_cb cb, void *arg);
+
+/* Cancel an in-progress evdns_getaddrinfo. This MUST NOT be called after the
+ * getaddrinfo's callback has been invoked. The resolves will be canceled,
+ * and the callback will be invoked with the error EVUTIL_EAI_CANCEL. */
+EVENT2_EXPORT_SYMBOL
+void evdns_getaddrinfo_cancel(struct evdns_getaddrinfo_request *req);
+
+/**
+ Retrieve the address of the 'idx'th configured nameserver.
+
+ @param base The evdns_base to examine.
+ @param idx The index of the nameserver to get the address of.
+ @param sa A location to receive the server's address.
+ @param len The number of bytes available at sa.
+
+ @return the number of bytes written into sa on success. On failure, returns
+ -1 if idx is greater than the number of configured nameservers, or a
+ value greater than 'len' if len was not high enough.
+ */
+EVENT2_EXPORT_SYMBOL
+int evdns_base_get_nameserver_addr(struct evdns_base *base, int idx,
+ struct sockaddr *sa, ev_socklen_t len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !EVENT2_DNS_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/dns_compat.h b/libs/libevent/include/event2/dns_compat.h
new file mode 100644
index 0000000000..965fd65445
--- /dev/null
+++ b/libs/libevent/include/event2/dns_compat.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2006-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_DNS_COMPAT_H_INCLUDED_
+#define EVENT2_DNS_COMPAT_H_INCLUDED_
+
+/** @file event2/dns_compat.h
+
+ Potentially non-threadsafe versions of the functions in dns.h: provided
+ only for backwards compatibility.
+
+
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <event2/event-config.h>
+#ifdef EVENT__HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+/* For int types. */
+#include <event2/util.h>
+
+/**
+ Initialize the asynchronous DNS library.
+
+ This function initializes support for non-blocking name resolution by
+ calling evdns_resolv_conf_parse() on UNIX and
+ evdns_config_windows_nameservers() on Windows.
+
+ @deprecated This function is deprecated because it always uses the current
+ event base, and is easily confused by multiple calls to event_init(), and
+ so is not safe for multithreaded use. Additionally, it allocates a global
+ structure that only one thread can use. The replacement is
+ evdns_base_new().
+
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_shutdown()
+ */
+int evdns_init(void);
+
+struct evdns_base;
+/**
+ Return the global evdns_base created by event_init() and used by the other
+ deprecated functions.
+
+ @deprecated This function is deprecated because use of the global
+ evdns_base is error-prone.
+ */
+struct evdns_base *evdns_get_global_base(void);
+
+/**
+ Shut down the asynchronous DNS resolver and terminate all active requests.
+
+ If the 'fail_requests' option is enabled, all active requests will return
+ an empty result with the error flag set to DNS_ERR_SHUTDOWN. Otherwise,
+ the requests will be silently discarded.
+
+ @deprecated This function is deprecated because it does not allow the
+ caller to specify which evdns_base it applies to. The recommended
+ function is evdns_base_shutdown().
+
+ @param fail_requests if zero, active requests will be aborted; if non-zero,
+ active requests will return DNS_ERR_SHUTDOWN.
+ @see evdns_init()
+ */
+void evdns_shutdown(int fail_requests);
+
+/**
+ Add a nameserver.
+
+ The address should be an IPv4 address in network byte order.
+ The type of address is chosen so that it matches in_addr.s_addr.
+
+ @deprecated This function is deprecated because it does not allow the
+ caller to specify which evdns_base it applies to. The recommended
+ function is evdns_base_nameserver_add().
+
+ @param address an IP address in network byte order
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_nameserver_ip_add()
+ */
+int evdns_nameserver_add(unsigned long int address);
+
+/**
+ Get the number of configured nameservers.
+
+ This returns the number of configured nameservers (not necessarily the
+ number of running nameservers). This is useful for double-checking
+ whether our calls to the various nameserver configuration functions
+ have been successful.
+
+ @deprecated This function is deprecated because it does not allow the
+ caller to specify which evdns_base it applies to. The recommended
+ function is evdns_base_count_nameservers().
+
+ @return the number of configured nameservers
+ @see evdns_nameserver_add()
+ */
+int evdns_count_nameservers(void);
+
+/**
+ Remove all configured nameservers, and suspend all pending resolves.
+
+ Resolves will not necessarily be re-attempted until evdns_resume() is called.
+
+ @deprecated This function is deprecated because it does not allow the
+ caller to specify which evdns_base it applies to. The recommended
+ function is evdns_base_clear_nameservers_and_suspend().
+
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_resume()
+ */
+int evdns_clear_nameservers_and_suspend(void);
+
+/**
+ Resume normal operation and continue any suspended resolve requests.
+
+ Re-attempt resolves left in limbo after an earlier call to
+ evdns_clear_nameservers_and_suspend().
+
+ @deprecated This function is deprecated because it does not allow the
+ caller to specify which evdns_base it applies to. The recommended
+ function is evdns_base_resume().
+
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_clear_nameservers_and_suspend()
+ */
+int evdns_resume(void);
+
+/**
+ Add a nameserver.
+
+ This wraps the evdns_nameserver_add() function by parsing a string as an IP
+ address and adds it as a nameserver.
+
+ @deprecated This function is deprecated because it does not allow the
+ caller to specify which evdns_base it applies to. The recommended
+ function is evdns_base_nameserver_ip_add().
+
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_nameserver_add()
+ */
+int evdns_nameserver_ip_add(const char *ip_as_string);
+
+/**
+ Lookup an A record for a given name.
+
+ @deprecated This function is deprecated because it does not allow the
+ caller to specify which evdns_base it applies to. The recommended
+ function is evdns_base_resolve_ipv4().
+
+ @param name a DNS hostname
+ @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
+ @param callback a callback function to invoke when the request is completed
+ @param ptr an argument to pass to the callback function
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_resolve_ipv6(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6()
+ */
+int evdns_resolve_ipv4(const char *name, int flags, evdns_callback_type callback, void *ptr);
+
+/**
+ Lookup an AAAA record for a given name.
+
+ @param name a DNS hostname
+ @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
+ @param callback a callback function to invoke when the request is completed
+ @param ptr an argument to pass to the callback function
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_resolve_ipv4(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6()
+ */
+int evdns_resolve_ipv6(const char *name, int flags, evdns_callback_type callback, void *ptr);
+
+struct in_addr;
+struct in6_addr;
+
+/**
+ Lookup a PTR record for a given IP address.
+
+ @deprecated This function is deprecated because it does not allow the
+ caller to specify which evdns_base it applies to. The recommended
+ function is evdns_base_resolve_reverse().
+
+ @param in an IPv4 address
+ @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
+ @param callback a callback function to invoke when the request is completed
+ @param ptr an argument to pass to the callback function
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_resolve_reverse_ipv6()
+ */
+int evdns_resolve_reverse(const struct in_addr *in, int flags, evdns_callback_type callback, void *ptr);
+
+/**
+ Lookup a PTR record for a given IPv6 address.
+
+ @deprecated This function is deprecated because it does not allow the
+ caller to specify which evdns_base it applies to. The recommended
+ function is evdns_base_resolve_reverse_ipv6().
+
+ @param in an IPv6 address
+ @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
+ @param callback a callback function to invoke when the request is completed
+ @param ptr an argument to pass to the callback function
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_resolve_reverse_ipv6()
+ */
+int evdns_resolve_reverse_ipv6(const struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr);
+
+/**
+ Set the value of a configuration option.
+
+ The currently available configuration options are:
+
+ ndots, timeout, max-timeouts, max-inflight, and attempts
+
+ @deprecated This function is deprecated because it does not allow the
+ caller to specify which evdns_base it applies to. The recommended
+ function is evdns_base_set_option().
+
+ @param option the name of the configuration option to be modified
+ @param val the value to be set
+ @param flags Ignored.
+ @return 0 if successful, or -1 if an error occurred
+ */
+int evdns_set_option(const char *option, const char *val, int flags);
+
+/**
+ Parse a resolv.conf file.
+
+ The 'flags' parameter determines what information is parsed from the
+ resolv.conf file. See the man page for resolv.conf for the format of this
+ file.
+
+ The following directives are not parsed from the file: sortlist, rotate,
+ no-check-names, inet6, debug.
+
+ If this function encounters an error, the possible return values are: 1 =
+ failed to open file, 2 = failed to stat file, 3 = file too large, 4 = out of
+ memory, 5 = short read from file, 6 = no nameservers listed in the file
+
+ @deprecated This function is deprecated because it does not allow the
+ caller to specify which evdns_base it applies to. The recommended
+ function is evdns_base_resolv_conf_parse().
+
+ @param flags any of DNS_OPTION_NAMESERVERS|DNS_OPTION_SEARCH|DNS_OPTION_MISC|
+ DNS_OPTIONS_ALL
+ @param filename the path to the resolv.conf file
+ @return 0 if successful, or various positive error codes if an error
+ occurred (see above)
+ @see resolv.conf(3), evdns_config_windows_nameservers()
+ */
+int evdns_resolv_conf_parse(int flags, const char *const filename);
+
+/**
+ Clear the list of search domains.
+
+ @deprecated This function is deprecated because it does not allow the
+ caller to specify which evdns_base it applies to. The recommended
+ function is evdns_base_search_clear().
+ */
+void evdns_search_clear(void);
+
+/**
+ Add a domain to the list of search domains
+
+ @deprecated This function is deprecated because it does not allow the
+ caller to specify which evdns_base it applies to. The recommended
+ function is evdns_base_search_add().
+
+ @param domain the domain to be added to the search list
+ */
+void evdns_search_add(const char *domain);
+
+/**
+ Set the 'ndots' parameter for searches.
+
+ Sets the number of dots which, when found in a name, causes
+ the first query to be without any search domain.
+
+ @deprecated This function is deprecated because it does not allow the
+ caller to specify which evdns_base it applies to. The recommended
+ function is evdns_base_search_ndots_set().
+
+ @param ndots the new ndots parameter
+ */
+void evdns_search_ndots_set(const int ndots);
+
+/**
+ As evdns_server_new_with_base.
+
+ @deprecated This function is deprecated because it does not allow the
+ caller to specify which even_base it uses. The recommended
+ function is evdns_add_server_port_with_base().
+
+*/
+struct evdns_server_port *evdns_add_server_port(evutil_socket_t socket, int flags, evdns_request_callback_fn_type callback, void *user_data);
+
+#ifdef _WIN32
+int evdns_config_windows_nameservers(void);
+#define EVDNS_CONFIG_WINDOWS_NAMESERVERS_IMPLEMENTED
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT2_EVENT_COMPAT_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/dns_struct.h b/libs/libevent/include/event2/dns_struct.h
new file mode 100644
index 0000000000..593a8a70b6
--- /dev/null
+++ b/libs/libevent/include/event2/dns_struct.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_DNS_STRUCT_H_INCLUDED_
+#define EVENT2_DNS_STRUCT_H_INCLUDED_
+
+/** @file event2/dns_struct.h
+
+ Data structures for dns. Using these structures may hurt forward
+ compatibility with later versions of Libevent: be careful!
+
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <event2/event-config.h>
+#ifdef EVENT__HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+/* For int types. */
+#include <event2/util.h>
+
+/*
+ * Structures used to implement a DNS server.
+ */
+
+struct evdns_server_request {
+ int flags;
+ int nquestions;
+ struct evdns_server_question **questions;
+};
+struct evdns_server_question {
+ int type;
+#ifdef __cplusplus
+ int dns_question_class;
+#else
+ /* You should refer to this field as "dns_question_class". The
+ * name "class" works in C for backward compatibility, and will be
+ * removed in a future version. (1.5 or later). */
+ int class;
+#define dns_question_class class
+#endif
+ char name[1];
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT2_DNS_STRUCT_H_INCLUDED_ */
+
diff --git a/libs/libevent/include/event2/event-config.h b/libs/libevent/include/event2/event-config.h
new file mode 100644
index 0000000000..395396efa3
--- /dev/null
+++ b/libs/libevent/include/event2/event-config.h
@@ -0,0 +1,534 @@
+/* event-config.h
+ *
+ * This file was generated by cmake when the makefiles were generated.
+ *
+ * DO NOT EDIT THIS FILE.
+ *
+ * Do not rely on macros in this file existing in later versions.
+ */
+#ifndef EVENT2_EVENT_CONFIG_H_INCLUDED_
+#define EVENT2_EVENT_CONFIG_H_INCLUDED_
+
+/* Numeric representation of the version */
+#define EVENT__NUMERIC_VERSION 0x02010500
+#define EVENT__PACKAGE_VERSION "2.1.5"
+
+#define EVENT__VERSION_MAJOR 2
+#define EVENT__VERSION_MINOR 1
+#define EVENT__VERSION_PATCH 5
+
+/* Version number of package */
+#define EVENT__VERSION "2.1.5-beta"
+
+/* Name of package */
+#define EVENT__PACKAGE "libevent"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define EVENT__PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define EVENT__PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define EVENT__PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define EVENT__PACKAGE_TARNAME ""
+
+/* Define if libevent should build without support for a debug mode */
+/* #undef EVENT__DISABLE_DEBUG_MODE */
+
+/* Define if libevent should not allow replacing the mm functions */
+/* #undef EVENT__DISABLE_MM_REPLACEMENT */
+
+/* Define if libevent should not be compiled with thread support */
+/* #undef EVENT__DISABLE_THREAD_SUPPORT */
+
+/* Define to 1 if you have the `accept4' function. */
+/* #undef EVENT__HAVE_ACCEPT4 */
+
+/* Define to 1 if you have the `arc4random' function. */
+/* #undef EVENT__HAVE_ARC4RANDOM */
+
+/* Define to 1 if you have the `arc4random_buf' function. */
+/* #undef EVENT__HAVE_ARC4RANDOM_BUF */
+
+/* Define if clock_gettime is available in libc */
+/* #undef EVENT__DNS_USE_CPU_CLOCK_FOR_ID */
+
+/* Define is no secure id variant is available */
+/* #undef EVENT__DNS_USE_GETTIMEOFDAY_FOR_ID */
+#define EVENT__DNS_USE_FTIME_FOR_ID
+
+/* Define to 1 if you have the <arpa/inet.h> header file. */
+/* #undef EVENT__HAVE_ARPA_INET_H */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+/* #undef EVENT__HAVE_CLOCK_GETTIME */
+
+/* Define to 1 if you have the declaration of `CTL_KERN'. */
+/* #undef EVENT__HAVE_DECL_CTL_KERN */
+
+/* Define to 1 if you have the declaration of `KERN_ARND'. */
+/* #undef EVENT__HAVE_DECL_KERN_ARND */
+
+/* Define to 1 if you have the declaration of `KERN_RANDOM'. */
+/* #undef EVENT__HAVE_DECL_KERN_RANDOM */
+
+/* Define if /dev/poll is available */
+/* #undef EVENT__HAVE_DEVPOLL */
+
+/* Define to 1 if you have the <netdb.h> header file. */
+/* #undef EVENT__HAVE_NETDB_H */
+
+/* Define to 1 if fd_mask type is defined */
+/* #undef EVENT__HAVE_FD_MASK */
+
+/* Define to 1 if the <sys/queue.h> header file defines TAILQ_FOREACH. */
+/* #undef EVENT__HAVE_TAILQFOREACH */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+/* #undef EVENT__HAVE_DLFCN_H */
+
+/* Define if your system supports the epoll system calls */
+/* #undef EVENT__HAVE_EPOLL */
+
+/* Define to 1 if you have the `epoll_create1' function. */
+/* #undef EVENT__HAVE_EPOLL_CREATE1 */
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+/* #undef EVENT__HAVE_EPOLL_CTL */
+
+/* Define to 1 if you have the `eventfd' function. */
+/* #undef EVENT__HAVE_EVENTFD */
+
+/* Define if your system supports event ports */
+/* #undef EVENT__HAVE_EVENT_PORTS */
+
+/* Define to 1 if you have the `fcntl' function. */
+/* #undef EVENT__HAVE_FCNTL */
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define EVENT__HAVE_FCNTL_H
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+#define EVENT__HAVE_GETADDRINFO
+
+/* Define to 1 if you have the `getegid' function. */
+/* #undef EVENT__HAVE_GETEGID */
+
+/* Define to 1 if you have the `geteuid' function. */
+/* #undef EVENT__HAVE_GETEUID */
+
+/* TODO: Check for different gethostname argument counts. CheckPrototypeDefinition.cmake can be used. */
+/* Define this if you have any gethostbyname_r() */
+/* #undef EVENT__HAVE_GETHOSTBYNAME_R */
+
+/* Define this if gethostbyname_r takes 3 arguments */
+/* #undef EVENT__HAVE_GETHOSTBYNAME_R_3_ARG */
+
+/* Define this if gethostbyname_r takes 5 arguments */
+/* #undef EVENT__HAVE_GETHOSTBYNAME_R_5_ARG */
+
+/* Define this if gethostbyname_r takes 6 arguments */
+/* #undef EVENT__HAVE_GETHOSTBYNAME_R_6_ARG */
+
+/* Define to 1 if you have the `getifaddrs' function. */
+/* #undef EVENT__HAVE_GETIFADDRS */
+
+/* Define to 1 if you have the `getnameinfo' function. */
+#define EVENT__HAVE_GETNAMEINFO
+
+/* Define to 1 if you have the `getprotobynumber' function. */
+#define EVENT__HAVE_GETPROTOBYNUMBER
+
+/* Define to 1 if you have the `getservbyname' function. */
+#define EVENT__HAVE_GETSERVBYNAME
+
+/* Define to 1 if you have the `gettimeofday' function. */
+/* #undef EVENT__HAVE_GETTIMEOFDAY */
+
+/* Define to 1 if you have the <ifaddrs.h> header file. */
+/* #undef EVENT__HAVE_IFADDRS_H */
+
+/* Define to 1 if you have the `inet_ntop' function. */
+/* #undef EVENT__HAVE_INET_NTOP */
+
+/* Define to 1 if you have the `inet_pton' function. */
+/* #undef EVENT__HAVE_INET_PTON */
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define EVENT__HAVE_INTTYPES_H
+
+/* Define to 1 if you have the `issetugid' function. */
+/* #undef EVENT__HAVE_ISSETUGID */
+
+/* Define to 1 if you have the `kqueue' function. */
+/* #undef EVENT__HAVE_KQUEUE */
+
+/* Define if the system has zlib */
+/* #undef EVENT__HAVE_LIBZ */
+
+/* Define to 1 if you have the `mach_absolute_time' function. */
+/* #undef EVENT__HAVE_MACH_ABSOLUTE_TIME */
+
+/* Define to 1 if you have the <mach/mach_time.h> header file. */
+/* #undef EVENT__HAVE_MACH_MACH_TIME_H */
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define EVENT__HAVE_MEMORY_H
+
+/* Define to 1 if you have the `mmap' function. */
+/* #undef EVENT__HAVE_MMAP */
+
+/* Define to 1 if you have the `nanosleep' function. */
+/* #undef EVENT__HAVE_NANOSLEEP */
+
+/* Define to 1 if you have the `usleep' function. */
+/* #undef EVENT__HAVE_USLEEP */
+
+/* Define to 1 if you have the <netdb.h> header file. */
+/* #undef EVENT__HAVE_NETDB_H */
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+/* #undef EVENT__HAVE_NETINET_IN6_H */
+
+/* Define to 1 if you have the <netinet/in.h> header file. */
+/* #undef EVENT__HAVE_NETINET_IN_H */
+
+/* Define to 1 if you have the <netinet/tcp.h> header file. */
+/* #undef EVENT__HAVE_NETINET_TCP_H */
+
+/* Define if the system has openssl */
+#define EVENT__HAVE_OPENSSL
+
+/* Defines if the system has zlib */
+/* #undef EVENT__HAVE_ZLIB */
+
+/* Define to 1 if you have the `pipe' function. */
+/* #undef EVENT__HAVE_PIPE */
+
+/* Define to 1 if you have the `pipe2' function. */
+/* #undef EVENT__HAVE_PIPE2 */
+
+/* Define to 1 if you have the `poll' function. */
+/* #undef EVENT__HAVE_POLL */
+
+/* Define to 1 if you have the <poll.h> header file. */
+/* #undef EVENT__HAVE_POLL_H */
+
+/* Define to 1 if you have the `port_create' function. */
+/* #undef EVENT__HAVE_PORT_CREATE */
+
+/* Define to 1 if you have the <port.h> header file. */
+/* #undef EVENT__HAVE_PORT_H */
+
+/* Define if you have POSIX threads libraries and header files. */
+/* #undef EVENT__HAVE_PTHREAD */
+
+/* Define if we have pthreads on this system */
+/* #undef EVENT__HAVE_PTHREADS */
+
+/* Define to 1 if you have the `putenv' function. */
+#define EVENT__HAVE_PUTENV
+
+/* Define to 1 if the system has the type `sa_family_t'. */
+/* #undef EVENT__HAVE_SA_FAMILY_T */
+
+/* Define to 1 if you have the `select' function. */
+/* #undef EVENT__HAVE_SELECT */
+
+/* Define to 1 if you have the `setenv' function. */
+/* #undef EVENT__HAVE_SETENV */
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+/* #undef EVENT__HAVE_SETFD */
+
+/* Define to 1 if you have the `setrlimit' function. */
+/* #undef EVENT__HAVE_SETRLIMIT */
+
+/* Define to 1 if you have the `sendfile' function. */
+/* #undef EVENT__HAVE_SENDFILE */
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+/* #undef EVENT__HAVE_SETFD */
+
+/* Define to 1 if you have the `sigaction' function. */
+/* #undef EVENT__HAVE_SIGACTION */
+
+/* Define to 1 if you have the `signal' function. */
+/* #undef EVENT__HAVE_SIGNAL */
+
+/* Define to 1 if you have the `splice' function. */
+/* #undef EVENT__HAVE_SPLICE */
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#define EVENT__HAVE_STDARG_H
+
+/* Define to 1 if you have the <stddef.h> header file. */
+#define EVENT__HAVE_STDDEF_H
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define EVENT__HAVE_STDINT_H
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define EVENT__HAVE_STDLIB_H
+
+/* Define to 1 if you have the <strings.h> header file. */
+/* #undef EVENT__HAVE_STRINGS_H */
+
+/* Define to 1 if you have the <string.h> header file. */
+#define EVENT__HAVE_STRING_H
+
+/* Define to 1 if you have the `strlcpy' function. */
+/* #undef EVENT__HAVE_STRLCPY */
+
+/* Define to 1 if you have the `strsep' function. */
+/* #undef EVENT__HAVE_STRSEP */
+
+/* Define to 1 if you have the `strtok_r' function. */
+/* #undef EVENT__HAVE_STRTOK_R */
+
+/* Define to 1 if you have the `strtoll' function. */
+#define EVENT__HAVE_STRTOLL
+
+/* Define to 1 if the system has the type `struct addrinfo'. */
+#define EVENT__HAVE_STRUCT_ADDRINFO
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#define EVENT__HAVE_STRUCT_IN6_ADDR
+
+/* Define to 1 if `s6_addr16' is member of `struct in6_addr'. */
+/* #undef EVENT__HAVE_STRUCT_IN6_ADDR_S6_ADDR16 */
+
+/* Define to 1 if `s6_addr32' is member of `struct in6_addr'. */
+/* #undef EVENT__HAVE_STRUCT_IN6_ADDR_S6_ADDR32 */
+
+/* Define to 1 if the system has the type `struct sockaddr_in6'. */
+#define EVENT__HAVE_STRUCT_SOCKADDR_IN6
+
+/* Define to 1 if `sin6_len' is member of `struct sockaddr_in6'. */
+/* #undef EVENT__HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN */
+
+/* Define to 1 if `sin_len' is member of `struct sockaddr_in'. */
+/* #undef EVENT__HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */
+
+/* Define to 1 if the system has the type `struct sockaddr_storage'. */
+#define EVENT__HAVE_STRUCT_SOCKADDR_STORAGE
+
+/* Define to 1 if `ss_family' is a member of `struct sockaddr_storage'. */
+#define EVENT__HAVE_STRUCT_SOCKADDR_STORAGE_SS_FAMILY
+
+/* Define to 1 if `__ss_family' is a member of `struct sockaddr_storage'. */
+/* #undef EVENT__HAVE_STRUCT_SOCKADDR_STORAGE___SS_FAMILY */
+
+/* Define to 1 if you have the `sysctl' function. */
+/* #undef EVENT__HAVE_SYSCTL */
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+/* #undef EVENT__HAVE_SYS_DEVPOLL_H */
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+/* #undef EVENT__HAVE_SYS_EPOLL_H */
+
+/* Define to 1 if you have the <sys/eventfd.h> header file. */
+/* #undef EVENT__HAVE_SYS_EVENTFD_H */
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+/* #undef EVENT__HAVE_SYS_EVENT_H */
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+/* #undef EVENT__HAVE_SYS_IOCTL_H */
+
+/* Define to 1 if you have the <sys/mman.h> header file. */
+/* #undef EVENT__HAVE_SYS_MMAN_H */
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+/* #undef EVENT__HAVE_SYS_PARAM_H */
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+/* #undef EVENT__HAVE_SYS_QUEUE_H */
+
+/* Define to 1 if you have the <sys/resource.h> header file. */
+/* #undef EVENT__HAVE_SYS_RESOURCE_H */
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+/* #undef EVENT__HAVE_SYS_SELECT_H */
+
+/* Define to 1 if you have the <sys/sendfile.h> header file. */
+/* #undef EVENT__HAVE_SYS_SENDFILE_H */
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+/* #undef EVENT__HAVE_SYS_SOCKET_H */
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define EVENT__HAVE_SYS_STAT_H
+
+/* Define to 1 if you have the <sys/sysctl.h> header file. */
+/* #undef EVENT__HAVE_SYS_SYSCTL_H */
+
+/* Define to 1 if you have the <sys/timerfd.h> header file. */
+/* #undef EVENT__HAVE_SYS_TIMERFD_H */
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+/* #undef EVENT__HAVE_SYS_TIME_H */
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define EVENT__HAVE_SYS_TYPES_H
+
+/* Define to 1 if you have the <sys/uio.h> header file. */
+/* #undef EVENT__HAVE_SYS_UIO_H */
+
+/* Define to 1 if you have the <sys/wait.h> header file. */
+/* #undef EVENT__HAVE_SYS_WAIT_H */
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+/* #undef EVENT__HAVE_TAILQFOREACH */
+
+/* Define if timeradd is defined in <sys/time.h> */
+/* #undef EVENT__HAVE_TIMERADD */
+
+/* Define if timerclear is defined in <sys/time.h> */
+/* #undef EVENT__HAVE_TIMERCLEAR */
+
+/* Define if timercmp is defined in <sys/time.h> */
+/* #undef EVENT__HAVE_TIMERCMP */
+
+/* Define to 1 if you have the `timerfd_create' function. */
+/* #undef EVENT__HAVE_TIMERFD_CREATE */
+
+/* Define if timerisset is defined in <sys/time.h> */
+/* #undef EVENT__HAVE_TIMERISSET */
+
+/* Define to 1 if the system has the type `uint8_t'. */
+#define EVENT__HAVE_UINT8_T
+
+/* Define to 1 if the system has the type `uint16_t'. */
+#define EVENT__HAVE_UINT16_T
+
+/* Define to 1 if the system has the type `uint32_t'. */
+#define EVENT__HAVE_UINT32_T
+
+/* Define to 1 if the system has the type `uint64_t'. */
+#define EVENT__HAVE_UINT64_T
+
+/* Define to 1 if the system has the type `uintptr_t'. */
+#define EVENT__HAVE_UINTPTR_T
+
+/* Define to 1 if you have the `umask' function. */
+/* #undef EVENT__HAVE_UMASK */
+
+/* Define to 1 if you have the <unistd.h> header file. */
+/* #undef EVENT__HAVE_UNISTD_H */
+
+/* Define to 1 if you have the `unsetenv' function. */
+/* #undef EVENT__HAVE_UNSETENV */
+
+/* Define to 1 if you have the `vasprintf' function. */
+/* #undef EVENT__HAVE_VASPRINTF */
+
+/* Define if kqueue works correctly with pipes */
+/* #undef EVENT__HAVE_WORKING_KQUEUE */
+
+#ifdef __USE_UNUSED_DEFINITIONS__
+/* Define to necessary symbol if this constant uses a non-standard name on your system. */
+/* XXX: Hello, this isn't even used, nor is it defined anywhere... - Ellzey */
+#define EVENT__PTHREAD_CREATE_JOINABLE
+#endif
+
+/* The size of `pthread_t', as computed by sizeof. */
+#define EVENT__SIZEOF_PTHREAD_T
+
+/* The size of a `int', as computed by sizeof. */
+#define EVENT__SIZEOF_INT 4
+
+/* The size of a `long', as computed by sizeof. */
+#define EVENT__SIZEOF_LONG 4
+
+/* The size of a `long long', as computed by sizeof. */
+#define EVENT__SIZEOF_LONG_LONG 8
+
+/* The size of `off_t', as computed by sizeof. */
+#define EVENT__SIZEOF_OFF_T 4
+
+#define EVENT__SIZEOF_SSIZE_T
+
+
+/* The size of a `short', as computed by sizeof. */
+#define EVENT__SIZEOF_SHORT 2
+
+/* The size of `size_t', as computed by sizeof. */
+#define EVENT__SIZEOF_SIZE_T 4
+
+/* Define to 1 if you have the ANSI C header files. */
+/* #undef EVENT__STDC_HEADERS */
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+/* #undef EVENT__TIME_WITH_SYS_TIME */
+
+/* The size of `socklen_t', as computed by sizeof. */
+#define EVENT__SIZEOF_SOCKLEN_T 4
+
+/* The size of 'void *', as computer by sizeof */
+#define EVENT__SIZEOF_VOID_P 4
+
+/* set an alias for whatever __func__ __FUNCTION__ is, what sillyness */
+#if defined (__func__)
+#define EVENT____func__ __func__
+#elif defined(__FUNCTION__)
+#define EVENT____func__ __FUNCTION__
+#else
+#define EVENT____func__ __FILE__
+#endif
+
+
+#ifdef __THESE_ARE_NOT_CONFIG_H_THINGS_THEY_ARE_DASH_D_THINGS__
+/* Number of bits in a file offset, on hosts where this is settable. */
+/* Ellzey is not satisfied */
+#define EVENT___FILE_OFFSET_BITS
+
+/* Define for large files, on AIX-style hosts. */
+#define
+#endif
+
+#ifdef _WhAT_DOES_THIS_EVEN_DO_
+/* Define to empty if `const' does not conform to ANSI C. */
+/* lolwut? - ellzey */
+#undef EVENT__const
+#endif
+
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+ calls it, or to nothing if 'inline' is not supported under any name. */
+#ifndef __cplusplus
+/* why not c++?
+ *
+ * and are we really expected to use EVENT__inline everywhere,
+ * shouldn't we just do:
+ * ifdef EVENT__inline
+ * define inline EVENT__inline
+ *
+ * - Ellzey
+ */
+
+#define EVENT__inline inline
+#endif
+
+/* Define to `int' if <sys/tyes.h> does not define. */
+#define EVENT__pid_t int
+
+/* Define to `unsigned' if <sys/types.h> does not define. */
+#define EVENT__size_t unsigned
+
+/* Define to unsigned int if you dont have it */
+#define EVENT__socklen_t socklen_t
+
+/* Define to `int' if <sys/types.h> does not define. */
+#define EVENT__ssize_t SSIZE_T
+
+/* #undef EVENT__NEED_DLLIMPORT */
+
+/* Define to 1 if you have ERR_remove_thread_stat(). */
+/* #undef EVENT__HAVE_ERR_REMOVE_THREAD_STATE */
+
+#endif
diff --git a/libs/libevent/include/event2/event.h b/libs/libevent/include/event2/event.h
new file mode 100644
index 0000000000..6e0a4f04c7
--- /dev/null
+++ b/libs/libevent/include/event2/event.h
@@ -0,0 +1,1675 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_EVENT_H_INCLUDED_
+#define EVENT2_EVENT_H_INCLUDED_
+
+/**
+ @mainpage
+
+ @section intro Introduction
+
+ Libevent is an event notification library for developing scalable network
+ servers. The Libevent API provides a mechanism to execute a callback
+ function when a specific event occurs on a file descriptor or after a
+ timeout has been reached. Furthermore, Libevent also support callbacks due
+ to signals or regular timeouts.
+
+ Libevent is meant to replace the event loop found in event driven network
+ servers. An application just needs to call event_base_dispatch() and then add or
+ remove events dynamically without having to change the event loop.
+
+
+ Currently, Libevent supports /dev/poll, kqueue(2), select(2), poll(2),
+ epoll(4), and evports. The internal event mechanism is completely
+ independent of the exposed event API, and a simple update of Libevent can
+ provide new functionality without having to redesign the applications. As a
+ result, Libevent allows for portable application development and provides
+ the most scalable event notification mechanism available on an operating
+ system. Libevent can also be used for multithreaded programs. Libevent
+ should compile on Linux, *BSD, Mac OS X, Solaris and, Windows.
+
+ @section usage Standard usage
+
+ Every program that uses Libevent must include the <event2/event.h>
+ header, and pass the -levent flag to the linker. (You can instead link
+ -levent_core if you only want the main event and buffered IO-based code,
+ and don't want to link any protocol code.)
+
+ @section setup Library setup
+
+ Before you call any other Libevent functions, you need to set up the
+ library. If you're going to use Libevent from multiple threads in a
+ multithreaded application, you need to initialize thread support --
+ typically by using evthread_use_pthreads() or
+ evthread_use_windows_threads(). See <event2/thread.h> for more
+ information.
+
+ This is also the point where you can replace Libevent's memory
+ management functions with event_set_mem_functions, and enable debug mode
+ with event_enable_debug_mode().
+
+ @section base Creating an event base
+
+ Next, you need to create an event_base structure, using event_base_new()
+ or event_base_new_with_config(). The event_base is responsible for
+ keeping track of which events are "pending" (that is to say, being
+ watched to see if they become active) and which events are "active".
+ Every event is associated with a single event_base.
+
+ @section event Event notification
+
+ For each file descriptor that you wish to monitor, you must create an
+ event structure with event_new(). (You may also declare an event
+ structure and call event_assign() to initialize the members of the
+ structure.) To enable notification, you add the structure to the list
+ of monitored events by calling event_add(). The event structure must
+ remain allocated as long as it is active, so it should generally be
+ allocated on the heap.
+
+ @section loop Dispatching events.
+
+ Finally, you call event_base_dispatch() to loop and dispatch events.
+ You can also use event_base_loop() for more fine-grained control.
+
+ Currently, only one thread can be dispatching a given event_base at a
+ time. If you want to run events in multiple threads at once, you can
+ either have a single event_base whose events add work to a work queue,
+ or you can create multiple event_base objects.
+
+ @section bufferevent I/O Buffers
+
+ Libevent provides a buffered I/O abstraction on top of the regular event
+ callbacks. This abstraction is called a bufferevent. A bufferevent
+ provides input and output buffers that get filled and drained
+ automatically. The user of a buffered event no longer deals directly
+ with the I/O, but instead is reading from input and writing to output
+ buffers.
+
+ Once initialized via bufferevent_socket_new(), the bufferevent structure
+ can be used repeatedly with bufferevent_enable() and
+ bufferevent_disable(). Instead of reading and writing directly to a
+ socket, you would call bufferevent_read() and bufferevent_write().
+
+ When read enabled the bufferevent will try to read from the file descriptor
+ and call the read callback. The write callback is executed whenever the
+ output buffer is drained below the write low watermark, which is 0 by
+ default.
+
+ See <event2/bufferevent*.h> for more information.
+
+ @section timers Timers
+
+ Libevent can also be used to create timers that invoke a callback after a
+ certain amount of time has expired. The evtimer_new() macro returns
+ an event struct to use as a timer. To activate the timer, call
+ evtimer_add(). Timers can be deactivated by calling evtimer_del().
+ (These macros are thin wrappers around event_new(), event_add(),
+ and event_del(); you can also use those instead.)
+
+ @section evdns Asynchronous DNS resolution
+
+ Libevent provides an asynchronous DNS resolver that should be used instead
+ of the standard DNS resolver functions. See the <event2/dns.h>
+ functions for more detail.
+
+ @section evhttp Event-driven HTTP servers
+
+ Libevent provides a very simple event-driven HTTP server that can be
+ embedded in your program and used to service HTTP requests.
+
+ To use this capability, you need to include the <event2/http.h> header in your
+ program. See that header for more information.
+
+ @section evrpc A framework for RPC servers and clients
+
+ Libevent provides a framework for creating RPC servers and clients. It
+ takes care of marshaling and unmarshaling all data structures.
+
+ @section api API Reference
+
+ To browse the complete documentation of the libevent API, click on any of
+ the following links.
+
+ event2/event.h
+ The primary libevent header
+
+ event2/thread.h
+ Functions for use by multithreaded programs
+
+ event2/buffer.h and event2/bufferevent.h
+ Buffer management for network reading and writing
+
+ event2/util.h
+ Utility functions for portable nonblocking network code
+
+ event2/dns.h
+ Asynchronous DNS resolution
+
+ event2/http.h
+ An embedded libevent-based HTTP server
+
+ event2/rpc.h
+ A framework for creating RPC servers and clients
+
+ */
+
+/** @file event2/event.h
+
+ Core functions for waiting for and receiving events, and using event bases.
+*/
+
+#include <event2/visibility.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <event2/event-config.h>
+#ifdef EVENT__HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#include <stdio.h>
+
+/* For int types. */
+#include <event2/util.h>
+
+/**
+ * Structure to hold information and state for a Libevent dispatch loop.
+ *
+ * The event_base lies at the center of Libevent; every application will
+ * have one. It keeps track of all pending and active events, and
+ * notifies your application of the active ones.
+ *
+ * This is an opaque structure; you can allocate one using
+ * event_base_new() or event_base_new_with_config().
+ *
+ * @see event_base_new(), event_base_free(), event_base_loop(),
+ * event_base_new_with_config()
+ */
+struct event_base
+#ifdef EVENT_IN_DOXYGEN_
+{/*Empty body so that doxygen will generate documentation here.*/}
+#endif
+;
+
+/**
+ * @struct event
+ *
+ * Structure to represent a single event.
+ *
+ * An event can have some underlying condition it represents: a socket
+ * becoming readable or writeable (or both), or a signal becoming raised.
+ * (An event that represents no underlying condition is still useful: you
+ * can use one to implement a timer, or to communicate between threads.)
+ *
+ * Generally, you can create events with event_new(), then make them
+ * pending with event_add(). As your event_base runs, it will run the
+ * callbacks of an events whose conditions are triggered. When you
+ * longer want the event, free it with event_free().
+ *
+ * In more depth:
+ *
+ * An event may be "pending" (one whose condition we are watching),
+ * "active" (one whose condition has triggered and whose callback is about
+ * to run), neither, or both. Events come into existence via
+ * event_assign() or event_new(), and are then neither active nor pending.
+ *
+ * To make an event pending, pass it to event_add(). When doing so, you
+ * can also set a timeout for the event.
+ *
+ * Events become active during an event_base_loop() call when either their
+ * condition has triggered, or when their timeout has elapsed. You can
+ * also activate an event manually using event_active(). The even_base
+ * loop will run the callbacks of active events; after it has done so, it
+ * marks them as no longer active.
+ *
+ * You can make an event non-pending by passing it to event_del(). This
+ * also makes the event non-active.
+ *
+ * Events can be "persistent" or "non-persistent". A non-persistent event
+ * becomes non-pending as soon as it is triggered: thus, it only runs at
+ * most once per call to event_add(). A persistent event remains pending
+ * even when it becomes active: you'll need to event_del() it manually in
+ * order to make it non-pending. When a persistent event with a timeout
+ * becomes active, its timeout is reset: this means you can use persistent
+ * events to implement periodic timeouts.
+ *
+ * This should be treated as an opaque structure; you should never read or
+ * write any of its fields directly. For backward compatibility with old
+ * code, it is defined in the event2/event_struct.h header; including this
+ * header may make your code incompatible with other versions of Libevent.
+ *
+ * @see event_new(), event_free(), event_assign(), event_get_assignment(),
+ * event_add(), event_del(), event_active(), event_pending(),
+ * event_get_fd(), event_get_base(), event_get_events(),
+ * event_get_callback(), event_get_callback_arg(),
+ * event_priority_set()
+ */
+struct event
+#ifdef EVENT_IN_DOXYGEN_
+{/*Empty body so that doxygen will generate documentation here.*/}
+#endif
+;
+
+/**
+ * Configuration for an event_base.
+ *
+ * There are many options that can be used to alter the behavior and
+ * implementation of an event_base. To avoid having to pass them all in a
+ * complex many-argument constructor, we provide an abstract data type
+ * wrhere you set up configation information before passing it to
+ * event_base_new_with_config().
+ *
+ * @see event_config_new(), event_config_free(), event_base_new_with_config(),
+ * event_config_avoid_method(), event_config_require_features(),
+ * event_config_set_flag(), event_config_set_num_cpus_hint()
+ */
+struct event_config
+#ifdef EVENT_IN_DOXYGEN_
+{/*Empty body so that doxygen will generate documentation here.*/}
+#endif
+;
+
+/**
+ * Enable some relatively expensive debugging checks in Libevent that
+ * would normally be turned off. Generally, these checks cause code that
+ * would otherwise crash mysteriously to fail earlier with an assertion
+ * failure. Note that this method MUST be called before any events or
+ * event_bases have been created.
+ *
+ * Debug mode can currently catch the following errors:
+ * An event is re-assigned while it is added
+ * Any function is called on a non-assigned event
+ *
+ * Note that debugging mode uses memory to track every event that has been
+ * initialized (via event_assign, event_set, or event_new) but not yet
+ * released (via event_free or event_debug_unassign). If you want to use
+ * debug mode, and you find yourself running out of memory, you will need
+ * to use event_debug_unassign to explicitly stop tracking events that
+ * are no longer considered set-up.
+ *
+ * @see event_debug_unassign()
+ */
+EVENT2_EXPORT_SYMBOL
+void event_enable_debug_mode(void);
+
+/**
+ * When debugging mode is enabled, informs Libevent that an event should no
+ * longer be considered as assigned. When debugging mode is not enabled, does
+ * nothing.
+ *
+ * This function must only be called on a non-added event.
+ *
+ * @see event_enable_debug_mode()
+ */
+EVENT2_EXPORT_SYMBOL
+void event_debug_unassign(struct event *);
+
+/**
+ * Create and return a new event_base to use with the rest of Libevent.
+ *
+ * @return a new event_base on success, or NULL on failure.
+ *
+ * @see event_base_free(), event_base_new_with_config()
+ */
+EVENT2_EXPORT_SYMBOL
+struct event_base *event_base_new(void);
+
+/**
+ Reinitialize the event base after a fork
+
+ Some event mechanisms do not survive across fork. The event base needs
+ to be reinitialized with the event_reinit() function.
+
+ @param base the event base that needs to be re-initialized
+ @return 0 if successful, or -1 if some events could not be re-added.
+ @see event_base_new()
+*/
+EVENT2_EXPORT_SYMBOL
+int event_reinit(struct event_base *base);
+
+/**
+ Event dispatching loop
+
+ This loop will run the event base until either there are no more pending or
+ active, or until something calls event_base_loopbreak() or
+ event_base_loopexit().
+
+ @param base the event_base structure returned by event_base_new() or
+ event_base_new_with_config()
+ @return 0 if successful, -1 if an error occurred, or 1 if we exited because
+ no events were pending or active.
+ @see event_base_loop()
+ */
+EVENT2_EXPORT_SYMBOL
+int event_base_dispatch(struct event_base *);
+
+/**
+ Get the kernel event notification mechanism used by Libevent.
+
+ @param eb the event_base structure returned by event_base_new()
+ @return a string identifying the kernel event mechanism (kqueue, epoll, etc.)
+ */
+EVENT2_EXPORT_SYMBOL
+const char *event_base_get_method(const struct event_base *);
+
+/**
+ Gets all event notification mechanisms supported by Libevent.
+
+ This functions returns the event mechanism in order preferred by
+ Libevent. Note that this list will include all backends that
+ Libevent has compiled-in support for, and will not necessarily check
+ your OS to see whether it has the required resources.
+
+ @return an array with pointers to the names of support methods.
+ The end of the array is indicated by a NULL pointer. If an
+ error is encountered NULL is returned.
+*/
+EVENT2_EXPORT_SYMBOL
+const char **event_get_supported_methods(void);
+
+/** Query the current monotonic time from a the timer for a struct
+ * event_base.
+ */
+EVENT2_EXPORT_SYMBOL
+int event_gettime_monotonic(struct event_base *base, struct timeval *tp);
+
+/**
+ @name event type flag
+
+ Flags to pass to event_base_get_num_events() to specify the kinds of events
+ we want to aggregate counts for
+*/
+/**@{*/
+/** count the number of active events, which have been triggered.*/
+#define EVENT_BASE_COUNT_ACTIVE 1U
+/** count the number of virtual events, which is used to represent an internal
+ * condition, other than a pending event, that keeps the loop from exiting. */
+#define EVENT_BASE_COUNT_VIRTUAL 2U
+/** count the number of events which have been added to event base, including
+ * internal events. */
+#define EVENT_BASE_COUNT_ADDED 4U
+/**@}*/
+
+/**
+ Gets the number of events in event_base, as specified in the flags.
+
+ Since event base has some internal events added to make some of its
+ functionalities work, EVENT_BASE_COUNT_ADDED may return more than the
+ number of events you added using event_add().
+
+ If you pass EVENT_BASE_COUNT_ACTIVE and EVENT_BASE_COUNT_ADDED together, an
+ active event will be counted twice. However, this might not be the case in
+ future libevent versions. The return value is an indication of the work
+ load, but the user shouldn't rely on the exact value as this may change in
+ the future.
+
+ @param eb the event_base structure returned by event_base_new()
+ @param flags a bitwise combination of the kinds of events to aggregate
+ counts for
+ @return the number of events specified in the flags
+*/
+EVENT2_EXPORT_SYMBOL
+int event_base_get_num_events(struct event_base *, unsigned int);
+
+/**
+ Get the maximum number of events in a given event_base as specified in the
+ flags.
+
+ @param eb the event_base structure returned by event_base_new()
+ @param flags a bitwise combination of the kinds of events to aggregate
+ counts for
+ @param clear option used to reset the maximum count.
+ @return the number of events specified in the flags
+ */
+EVENT2_EXPORT_SYMBOL
+int event_base_get_max_events(struct event_base *, unsigned int, int);
+
+/**
+ Allocates a new event configuration object.
+
+ The event configuration object can be used to change the behavior of
+ an event base.
+
+ @return an event_config object that can be used to store configuration, or
+ NULL if an error is encountered.
+ @see event_base_new_with_config(), event_config_free(), event_config
+*/
+EVENT2_EXPORT_SYMBOL
+struct event_config *event_config_new(void);
+
+/**
+ Deallocates all memory associated with an event configuration object
+
+ @param cfg the event configuration object to be freed.
+*/
+EVENT2_EXPORT_SYMBOL
+void event_config_free(struct event_config *cfg);
+
+/**
+ Enters an event method that should be avoided into the configuration.
+
+ This can be used to avoid event mechanisms that do not support certain
+ file descriptor types, or for debugging to avoid certain event
+ mechanisms. An application can make use of multiple event bases to
+ accommodate incompatible file descriptor types.
+
+ @param cfg the event configuration object
+ @param method the name of the event method to avoid
+ @return 0 on success, -1 on failure.
+*/
+EVENT2_EXPORT_SYMBOL
+int event_config_avoid_method(struct event_config *cfg, const char *method);
+
+/**
+ A flag used to describe which features an event_base (must) provide.
+
+ Because of OS limitations, not every Libevent backend supports every
+ possible feature. You can use this type with
+ event_config_require_features() to tell Libevent to only proceed if your
+ event_base implements a given feature, and you can receive this type from
+ event_base_get_features() to see which features are available.
+*/
+enum event_method_feature {
+ /** Require an event method that allows edge-triggered events with EV_ET. */
+ EV_FEATURE_ET = 0x01,
+ /** Require an event method where having one event triggered among
+ * many is [approximately] an O(1) operation. This excludes (for
+ * example) select and poll, which are approximately O(N) for N
+ * equal to the total number of possible events. */
+ EV_FEATURE_O1 = 0x02,
+ /** Require an event method that allows file descriptors as well as
+ * sockets. */
+ EV_FEATURE_FDS = 0x04,
+ /** Require an event method that allows you to use EV_CLOSED to detect
+ * connection close without the necessity of reading all the pending data.
+ *
+ * Methods that do support EV_CLOSED may not be able to provide support on
+ * all kernel versions.
+ **/
+ EV_FEATURE_EARLY_CLOSE = 0x08
+};
+
+/**
+ A flag passed to event_config_set_flag().
+
+ These flags change the behavior of an allocated event_base.
+
+ @see event_config_set_flag(), event_base_new_with_config(),
+ event_method_feature
+ */
+enum event_base_config_flag {
+ /** Do not allocate a lock for the event base, even if we have
+ locking set up.
+
+ Setting this option will make it unsafe and nonfunctional to call
+ functions on the base concurrently from multiple threads.
+ */
+ EVENT_BASE_FLAG_NOLOCK = 0x01,
+ /** Do not check the EVENT_* environment variables when configuring
+ an event_base */
+ EVENT_BASE_FLAG_IGNORE_ENV = 0x02,
+ /** Windows only: enable the IOCP dispatcher at startup
+
+ If this flag is set then bufferevent_socket_new() and
+ evconn_listener_new() will use IOCP-backed implementations
+ instead of the usual select-based one on Windows.
+ */
+ EVENT_BASE_FLAG_STARTUP_IOCP = 0x04,
+ /** Instead of checking the current time every time the event loop is
+ ready to run timeout callbacks, check after each timeout callback.
+ */
+ EVENT_BASE_FLAG_NO_CACHE_TIME = 0x08,
+
+ /** If we are using the epoll backend, this flag says that it is
+ safe to use Libevent's internal change-list code to batch up
+ adds and deletes in order to try to do as few syscalls as
+ possible. Setting this flag can make your code run faster, but
+ it may trigger a Linux bug: it is not safe to use this flag
+ if you have any fds cloned by dup() or its variants. Doing so
+ will produce strange and hard-to-diagnose bugs.
+
+ This flag can also be activated by setting the
+ EVENT_EPOLL_USE_CHANGELIST environment variable.
+
+ This flag has no effect if you wind up using a backend other than
+ epoll.
+ */
+ EVENT_BASE_FLAG_EPOLL_USE_CHANGELIST = 0x10,
+
+ /** Ordinarily, Libevent implements its time and timeout code using
+ the fastest monotonic timer that we have. If this flag is set,
+ however, we use less efficient more precise timer, assuming one is
+ present.
+ */
+ EVENT_BASE_FLAG_PRECISE_TIMER = 0x20
+};
+
+/**
+ Return a bitmask of the features implemented by an event base. This
+ will be a bitwise OR of one or more of the values of
+ event_method_feature
+
+ @see event_method_feature
+ */
+EVENT2_EXPORT_SYMBOL
+int event_base_get_features(const struct event_base *base);
+
+/**
+ Enters a required event method feature that the application demands.
+
+ Note that not every feature or combination of features is supported
+ on every platform. Code that requests features should be prepared
+ to handle the case where event_base_new_with_config() returns NULL, as in:
+ <pre>
+ event_config_require_features(cfg, EV_FEATURE_ET);
+ base = event_base_new_with_config(cfg);
+ if (base == NULL) {
+ // We can't get edge-triggered behavior here.
+ event_config_require_features(cfg, 0);
+ base = event_base_new_with_config(cfg);
+ }
+ </pre>
+
+ @param cfg the event configuration object
+ @param feature a bitfield of one or more event_method_feature values.
+ Replaces values from previous calls to this function.
+ @return 0 on success, -1 on failure.
+ @see event_method_feature, event_base_new_with_config()
+*/
+EVENT2_EXPORT_SYMBOL
+int event_config_require_features(struct event_config *cfg, int feature);
+
+/**
+ * Sets one or more flags to configure what parts of the eventual event_base
+ * will be initialized, and how they'll work.
+ *
+ * @see event_base_config_flags, event_base_new_with_config()
+ **/
+EVENT2_EXPORT_SYMBOL
+int event_config_set_flag(struct event_config *cfg, int flag);
+
+/**
+ * Records a hint for the number of CPUs in the system. This is used for
+ * tuning thread pools, etc, for optimal performance. In Libevent 2.0,
+ * it is only on Windows, and only when IOCP is in use.
+ *
+ * @param cfg the event configuration object
+ * @param cpus the number of cpus
+ * @return 0 on success, -1 on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int event_config_set_num_cpus_hint(struct event_config *cfg, int cpus);
+
+/**
+ * Record an interval and/or a number of callbacks after which the event base
+ * should check for new events. By default, the event base will run as many
+ * events are as activated at the higest activated priority before checking
+ * for new events. If you configure it by setting max_interval, it will check
+ * the time after each callback, and not allow more than max_interval to
+ * elapse before checking for new events. If you configure it by setting
+ * max_callbacks to a value >= 0, it will run no more than max_callbacks
+ * callbacks before checking for new events.
+ *
+ * This option can decrease the latency of high-priority events, and
+ * avoid priority inversions where multiple low-priority events keep us from
+ * polling for high-priority events, but at the expense of slightly decreasing
+ * the throughput. Use it with caution!
+ *
+ * @param cfg The event_base configuration object.
+ * @param max_interval An interval after which Libevent should stop running
+ * callbacks and check for more events, or NULL if there should be
+ * no such interval.
+ * @param max_callbacks A number of callbacks after which Libevent should
+ * stop running callbacks and check for more events, or -1 if there
+ * should be no such limit.
+ * @param min_priority A priority below which max_interval and max_callbacks
+ * should not be enforced. If this is set to 0, they are enforced
+ * for events of every priority; if it's set to 1, they're enforced
+ * for events of priority 1 and above, and so on.
+ * @return 0 on success, -1 on failure.
+ **/
+EVENT2_EXPORT_SYMBOL
+int event_config_set_max_dispatch_interval(struct event_config *cfg,
+ const struct timeval *max_interval, int max_callbacks,
+ int min_priority);
+
+/**
+ Initialize the event API.
+
+ Use event_base_new_with_config() to initialize a new event base, taking
+ the specified configuration under consideration. The configuration object
+ can currently be used to avoid certain event notification mechanisms.
+
+ @param cfg the event configuration object
+ @return an initialized event_base that can be used to registering events,
+ or NULL if no event base can be created with the requested event_config.
+ @see event_base_new(), event_base_free(), event_init(), event_assign()
+*/
+EVENT2_EXPORT_SYMBOL
+struct event_base *event_base_new_with_config(const struct event_config *);
+
+/**
+ Deallocate all memory associated with an event_base, and free the base.
+
+ Note that this function will not close any fds or free any memory passed
+ to event_new as the argument to callback.
+
+ If there are any pending finalizer callbacks, this function will invoke
+ them.
+
+ @param eb an event_base to be freed
+ */
+EVENT2_EXPORT_SYMBOL
+void event_base_free(struct event_base *);
+
+/**
+ As event_free, but do not run finalizers.
+
+ THIS IS AN EXPERIMENTAL API. IT MIGHT CHANGE BEFORE THE LIBEVENT 2.1 SERIES
+ BECOMES STABLE.
+ */
+EVENT2_EXPORT_SYMBOL
+void event_base_free_nofinalize(struct event_base *);
+
+/** @name Log severities
+ */
+/**@{*/
+#define EVENT_LOG_DEBUG 0
+#define EVENT_LOG_MSG 1
+#define EVENT_LOG_WARN 2
+#define EVENT_LOG_ERR 3
+/**@}*/
+
+/* Obsolete names: these are deprecated, but older programs might use them.
+ * They violate the reserved-identifier namespace. */
+#define _EVENT_LOG_DEBUG EVENT_LOG_DEBUG
+#define _EVENT_LOG_MSG EVENT_LOG_MSG
+#define _EVENT_LOG_WARN EVENT_LOG_WARN
+#define _EVENT_LOG_ERR EVENT_LOG_ERR
+
+/**
+ A callback function used to intercept Libevent's log messages.
+
+ @see event_set_log_callback
+ */
+typedef void (*event_log_cb)(int severity, const char *msg);
+/**
+ Redirect Libevent's log messages.
+
+ @param cb a function taking two arguments: an integer severity between
+ EVENT_LOG_DEBUG and EVENT_LOG_ERR, and a string. If cb is NULL,
+ then the default log is used.
+
+ NOTE: The function you provide *must not* call any other libevent
+ functionality. Doing so can produce undefined behavior.
+ */
+EVENT2_EXPORT_SYMBOL
+void event_set_log_callback(event_log_cb cb);
+
+/**
+ A function to be called if Libevent encounters a fatal internal error.
+
+ @see event_set_fatal_callback
+ */
+typedef void (*event_fatal_cb)(int err);
+
+/**
+ Override Libevent's behavior in the event of a fatal internal error.
+
+ By default, Libevent will call exit(1) if a programming error makes it
+ impossible to continue correct operation. This function allows you to supply
+ another callback instead. Note that if the function is ever invoked,
+ something is wrong with your program, or with Libevent: any subsequent calls
+ to Libevent may result in undefined behavior.
+
+ Libevent will (almost) always log an EVENT_LOG_ERR message before calling
+ this function; look at the last log message to see why Libevent has died.
+ */
+EVENT2_EXPORT_SYMBOL
+void event_set_fatal_callback(event_fatal_cb cb);
+
+#define EVENT_DBG_ALL 0xffffffffu
+#define EVENT_DBG_NONE 0
+
+/**
+ Turn on debugging logs and have them sent to the default log handler.
+
+ This is a global setting; if you are going to call it, you must call this
+ before any calls that create an event-base. You must call it before any
+ multithreaded use of Libevent.
+
+ Debug logs are verbose.
+
+ @param which Controls which debug messages are turned on. This option is
+ unused for now; for forward compatibility, you must pass in the constant
+ "EVENT_DBG_ALL" to turn debugging logs on, or "EVENT_DBG_NONE" to turn
+ debugging logs off.
+ */
+EVENT2_EXPORT_SYMBOL
+void event_enable_debug_logging(ev_uint32_t which);
+
+/**
+ Associate a different event base with an event.
+
+ The event to be associated must not be currently active or pending.
+
+ @param eb the event base
+ @param ev the event
+ @return 0 on success, -1 on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int event_base_set(struct event_base *, struct event *);
+
+/** @name Loop flags
+
+ These flags control the behavior of event_base_loop().
+ */
+/**@{*/
+/** Block until we have an active event, then exit once all active events
+ * have had their callbacks run. */
+#define EVLOOP_ONCE 0x01
+/** Do not block: see which events are ready now, run the callbacks
+ * of the highest-priority ones, then exit. */
+#define EVLOOP_NONBLOCK 0x02
+/** Do not exit the loop because we have no pending events. Instead, keep
+ * running until event_base_loopexit() or event_base_loopbreak() makes us
+ * stop.
+ */
+#define EVLOOP_NO_EXIT_ON_EMPTY 0x04
+/**@}*/
+
+/**
+ Wait for events to become active, and run their callbacks.
+
+ This is a more flexible version of event_base_dispatch().
+
+ By default, this loop will run the event base until either there are no more
+ pending or active events, or until something calls event_base_loopbreak() or
+ event_base_loopexit(). You can override this behavior with the 'flags'
+ argument.
+
+ @param eb the event_base structure returned by event_base_new() or
+ event_base_new_with_config()
+ @param flags any combination of EVLOOP_ONCE | EVLOOP_NONBLOCK
+ @return 0 if successful, -1 if an error occurred, or 1 if we exited because
+ no events were pending or active.
+ @see event_base_loopexit(), event_base_dispatch(), EVLOOP_ONCE,
+ EVLOOP_NONBLOCK
+ */
+EVENT2_EXPORT_SYMBOL
+int event_base_loop(struct event_base *, int);
+
+/**
+ Exit the event loop after the specified time
+
+ The next event_base_loop() iteration after the given timer expires will
+ complete normally (handling all queued events) then exit without
+ blocking for events again.
+
+ Subsequent invocations of event_base_loop() will proceed normally.
+
+ @param eb the event_base structure returned by event_init()
+ @param tv the amount of time after which the loop should terminate,
+ or NULL to exit after running all currently active events.
+ @return 0 if successful, or -1 if an error occurred
+ @see event_base_loopbreak()
+ */
+EVENT2_EXPORT_SYMBOL
+int event_base_loopexit(struct event_base *, const struct timeval *);
+
+/**
+ Abort the active event_base_loop() immediately.
+
+ event_base_loop() will abort the loop after the next event is completed;
+ event_base_loopbreak() is typically invoked from this event's callback.
+ This behavior is analogous to the "break;" statement.
+
+ Subsequent invocations of event_base_loop() will proceed normally.
+
+ @param eb the event_base structure returned by event_init()
+ @return 0 if successful, or -1 if an error occurred
+ @see event_base_loopexit()
+ */
+EVENT2_EXPORT_SYMBOL
+int event_base_loopbreak(struct event_base *);
+
+/**
+ Tell the active event_base_loop() to scan for new events immediately.
+
+ Calling this function makes the currently active event_base_loop()
+ start the loop over again (scanning for new events) after the current
+ event callback finishes. If the event loop is not running, this
+ function has no effect.
+
+ event_base_loopbreak() is typically invoked from this event's callback.
+ This behavior is analogous to the "continue;" statement.
+
+ Subsequent invocations of event loop will proceed normally.
+
+ @param eb the event_base structure returned by event_init()
+ @return 0 if successful, or -1 if an error occurred
+ @see event_base_loopbreak()
+ */
+EVENT2_EXPORT_SYMBOL
+int event_base_loopcontinue(struct event_base *);
+
+/**
+ Checks if the event loop was told to exit by event_base_loopexit().
+
+ This function will return true for an event_base at every point after
+ event_loopexit() is called, until the event loop is next entered.
+
+ @param eb the event_base structure returned by event_init()
+ @return true if event_base_loopexit() was called on this event base,
+ or 0 otherwise
+ @see event_base_loopexit()
+ @see event_base_got_break()
+ */
+EVENT2_EXPORT_SYMBOL
+int event_base_got_exit(struct event_base *);
+
+/**
+ Checks if the event loop was told to abort immediately by event_base_loopbreak().
+
+ This function will return true for an event_base at every point after
+ event_base_loopbreak() is called, until the event loop is next entered.
+
+ @param eb the event_base structure returned by event_init()
+ @return true if event_base_loopbreak() was called on this event base,
+ or 0 otherwise
+ @see event_base_loopbreak()
+ @see event_base_got_exit()
+ */
+EVENT2_EXPORT_SYMBOL
+int event_base_got_break(struct event_base *);
+
+/**
+ * @name event flags
+ *
+ * Flags to pass to event_new(), event_assign(), event_pending(), and
+ * anything else with an argument of the form "short events"
+ */
+/**@{*/
+/** Indicates that a timeout has occurred. It's not necessary to pass
+ * this flag to event_for new()/event_assign() to get a timeout. */
+#define EV_TIMEOUT 0x01
+/** Wait for a socket or FD to become readable */
+#define EV_READ 0x02
+/** Wait for a socket or FD to become writeable */
+#define EV_WRITE 0x04
+/** Wait for a POSIX signal to be raised*/
+#define EV_SIGNAL 0x08
+/**
+ * Persistent event: won't get removed automatically when activated.
+ *
+ * When a persistent event with a timeout becomes activated, its timeout
+ * is reset to 0.
+ */
+#define EV_PERSIST 0x10
+/** Select edge-triggered behavior, if supported by the backend. */
+#define EV_ET 0x20
+/**
+ * If this option is provided, then event_del() will not block in one thread
+ * while waiting for the event callback to complete in another thread.
+ *
+ * To use this option safely, you may need to use event_finalize() or
+ * event_free_finalize() in order to safely tear down an event in a
+ * multithreaded application. See those functions for more information.
+ *
+ * THIS IS AN EXPERIMENTAL API. IT MIGHT CHANGE BEFORE THE LIBEVENT 2.1 SERIES
+ * BECOMES STABLE.
+ **/
+#define EV_FINALIZE 0x40
+/**
+ * Detects connection close events. You can use this to detect when a
+ * connection has been closed, without having to read all the pending data
+ * from a connection.
+ *
+ * Not all backends support EV_CLOSED. To detect or require it, use the
+ * feature flag EV_FEATURE_EARLY_CLOSE.
+ **/
+#define EV_CLOSED 0x80
+/**@}*/
+
+/**
+ @name evtimer_* macros
+
+ Aliases for working with one-shot timer events */
+/**@{*/
+#define evtimer_assign(ev, b, cb, arg) \
+ event_assign((ev), (b), -1, 0, (cb), (arg))
+#define evtimer_new(b, cb, arg) event_new((b), -1, 0, (cb), (arg))
+#define evtimer_add(ev, tv) event_add((ev), (tv))
+#define evtimer_del(ev) event_del(ev)
+#define evtimer_pending(ev, tv) event_pending((ev), EV_TIMEOUT, (tv))
+#define evtimer_initialized(ev) event_initialized(ev)
+/**@}*/
+
+/**
+ @name evsignal_* macros
+
+ Aliases for working with signal events
+ */
+/**@{*/
+#define evsignal_add(ev, tv) event_add((ev), (tv))
+#define evsignal_assign(ev, b, x, cb, arg) \
+ event_assign((ev), (b), (x), EV_SIGNAL|EV_PERSIST, cb, (arg))
+#define evsignal_new(b, x, cb, arg) \
+ event_new((b), (x), EV_SIGNAL|EV_PERSIST, (cb), (arg))
+#define evsignal_del(ev) event_del(ev)
+#define evsignal_pending(ev, tv) event_pending((ev), EV_SIGNAL, (tv))
+#define evsignal_initialized(ev) event_initialized(ev)
+/**@}*/
+
+/**
+ A callback function for an event.
+
+ It receives three arguments:
+
+ @param fd An fd or signal
+ @param events One or more EV_* flags
+ @param arg A user-supplied argument.
+
+ @see event_new()
+ */
+typedef void (*event_callback_fn)(evutil_socket_t, short, void *);
+
+/**
+ Return a value used to specify that the event itself must be used as the callback argument.
+
+ The function event_new() takes a callback argument which is passed
+ to the event's callback function. To specify that the argument to be
+ passed to the callback function is the event that event_new() returns,
+ pass in the return value of event_self_cbarg() as the callback argument
+ for event_new().
+
+ For example:
+ <pre>
+ struct event *ev = event_new(base, sock, events, callback, %event_self_cbarg());
+ </pre>
+
+ For consistency with event_new(), it is possible to pass the return value
+ of this function as the callback argument for event_assign() &ndash; this
+ achieves the same result as passing the event in directly.
+
+ @return a value to be passed as the callback argument to event_new() or
+ event_assign().
+ @see event_new(), event_assign()
+ */
+EVENT2_EXPORT_SYMBOL
+void *event_self_cbarg(void);
+
+/**
+ Allocate and asssign a new event structure, ready to be added.
+
+ The function event_new() returns a new event that can be used in
+ future calls to event_add() and event_del(). The fd and events
+ arguments determine which conditions will trigger the event; the
+ callback and callback_arg arguments tell Libevent what to do when the
+ event becomes active.
+
+ If events contains one of EV_READ, EV_WRITE, or EV_READ|EV_WRITE, then
+ fd is a file descriptor or socket that should get monitored for
+ readiness to read, readiness to write, or readiness for either operation
+ (respectively). If events contains EV_SIGNAL, then fd is a signal
+ number to wait for. If events contains none of those flags, then the
+ event can be triggered only by a timeout or by manual activation with
+ event_active(): In this case, fd must be -1.
+
+ The EV_PERSIST flag can also be passed in the events argument: it makes
+ event_add() persistent until event_del() is called.
+
+ The EV_ET flag is compatible with EV_READ and EV_WRITE, and supported
+ only by certain backends. It tells Libevent to use edge-triggered
+ events.
+
+ The EV_TIMEOUT flag has no effect here.
+
+ It is okay to have multiple events all listening on the same fds; but
+ they must either all be edge-triggered, or all not be edge triggerd.
+
+ When the event becomes active, the event loop will run the provided
+ callbuck function, with three arguments. The first will be the provided
+ fd value. The second will be a bitfield of the events that triggered:
+ EV_READ, EV_WRITE, or EV_SIGNAL. Here the EV_TIMEOUT flag indicates
+ that a timeout occurred, and EV_ET indicates that an edge-triggered
+ event occurred. The third event will be the callback_arg pointer that
+ you provide.
+
+ @param base the event base to which the event should be attached.
+ @param fd the file descriptor or signal to be monitored, or -1.
+ @param events desired events to monitor: bitfield of EV_READ, EV_WRITE,
+ EV_SIGNAL, EV_PERSIST, EV_ET.
+ @param callback callback function to be invoked when the event occurs
+ @param callback_arg an argument to be passed to the callback function
+
+ @return a newly allocated struct event that must later be freed with
+ event_free().
+ @see event_free(), event_add(), event_del(), event_assign()
+ */
+EVENT2_EXPORT_SYMBOL
+struct event *event_new(struct event_base *, evutil_socket_t, short, event_callback_fn, void *);
+
+
+/**
+ Prepare a new, already-allocated event structure to be added.
+
+ The function event_assign() prepares the event structure ev to be used
+ in future calls to event_add() and event_del(). Unlike event_new(), it
+ doesn't allocate memory itself: it requires that you have already
+ allocated a struct event, probably on the heap. Doing this will
+ typically make your code depend on the size of the event structure, and
+ thereby create incompatibility with future versions of Libevent.
+
+ The easiest way to avoid this problem is just to use event_new() and
+ event_free() instead.
+
+ A slightly harder way to future-proof your code is to use
+ event_get_struct_event_size() to determine the required size of an event
+ at runtime.
+
+ Note that it is NOT safe to call this function on an event that is
+ active or pending. Doing so WILL corrupt internal data structures in
+ Libevent, and lead to strange, hard-to-diagnose bugs. You _can_ use
+ event_assign to change an existing event, but only if it is not active
+ or pending!
+
+ The arguments for this function, and the behavior of the events that it
+ makes, are as for event_new().
+
+ @param ev an event struct to be modified
+ @param base the event base to which ev should be attached.
+ @param fd the file descriptor to be monitored
+ @param events desired events to monitor; can be EV_READ and/or EV_WRITE
+ @param callback callback function to be invoked when the event occurs
+ @param callback_arg an argument to be passed to the callback function
+
+ @return 0 if success, or -1 on invalid arguments.
+
+ @see event_new(), event_add(), event_del(), event_base_once(),
+ event_get_struct_event_size()
+ */
+EVENT2_EXPORT_SYMBOL
+int event_assign(struct event *, struct event_base *, evutil_socket_t, short, event_callback_fn, void *);
+
+/**
+ Deallocate a struct event * returned by event_new().
+
+ If the event is pending or active, first make it non-pending and
+ non-active.
+ */
+EVENT2_EXPORT_SYMBOL
+void event_free(struct event *);
+
+/**
+ * Callback type for event_finalize and event_free_finalize().
+ *
+ * THIS IS AN EXPERIMENTAL API. IT MIGHT CHANGE BEFORE THE LIBEVENT 2.1 SERIES
+ * BECOMES STABLE.
+ *
+ **/
+typedef void (*event_finalize_callback_fn)(struct event *, void *);
+/**
+ @name Finalization functions
+
+ These functions are used to safely tear down an event in a multithreaded
+ application. If you construct your events with EV_FINALIZE to avoid
+ deadlocks, you will need a way to remove an event in the certainty that
+ it will definitely not be running its callback when you deallocate it
+ and its callback argument.
+
+ To do this, call one of event_finalize() or event_free_finalize with
+ 0 for its first argument, the event to tear down as its second argument,
+ and a callback function as its third argument. The callback will be
+ invoked as part of the event loop, with the event's priority.
+
+ After you call a finalizer function, event_add() and event_active() will
+ no longer work on the event, and event_del() will produce a no-op. You
+ must not try to change the event's fields with event_assign() or
+ event_set() while the finalize callback is in progress. Once the
+ callback has been invoked, you should treat the event structure as
+ containing uninitialized memory.
+
+ The event_free_finalize() function frees the event after it's finalized;
+ event_finalize() does not.
+
+ A finalizer callback must not make events pending or active. It must not
+ add events, activate events, or attempt to "resucitate" the event being
+ finalized in any way.
+
+ THIS IS AN EXPERIMENTAL API. IT MIGHT CHANGE BEFORE THE LIBEVENT 2.1 SERIES
+ BECOMES STABLE.
+
+ @return 0 on succes, -1 on failure.
+ */
+/**@{*/
+EVENT2_EXPORT_SYMBOL
+int event_finalize(unsigned, struct event *, event_finalize_callback_fn);
+EVENT2_EXPORT_SYMBOL
+int event_free_finalize(unsigned, struct event *, event_finalize_callback_fn);
+/**@}*/
+
+/**
+ Schedule a one-time event
+
+ The function event_base_once() is similar to event_new(). However, it
+ schedules a callback to be called exactly once, and does not require the
+ caller to prepare an event structure.
+
+ Note that in Libevent 2.0 and earlier, if the event is never triggered, the
+ internal memory used to hold it will never be freed. In Libevent 2.1,
+ the internal memory will get freed by event_base_free() if the event
+ is never triggered. The 'arg' value, however, will not get freed in either
+ case--you'll need to free that on your own if you want it to go away.
+
+ @param base an event_base
+ @param fd a file descriptor to monitor, or -1 for no fd.
+ @param events event(s) to monitor; can be any of EV_READ |
+ EV_WRITE, or EV_TIMEOUT
+ @param callback callback function to be invoked when the event occurs
+ @param arg an argument to be passed to the callback function
+ @param timeout the maximum amount of time to wait for the event. NULL
+ makes an EV_READ/EV_WRITE event make forever; NULL makes an
+ EV_TIMEOUT event succees immediately.
+ @return 0 if successful, or -1 if an error occurred
+ */
+EVENT2_EXPORT_SYMBOL
+int event_base_once(struct event_base *, evutil_socket_t, short, event_callback_fn, void *, const struct timeval *);
+
+/**
+ Add an event to the set of pending events.
+
+ The function event_add() schedules the execution of the event 'ev' when the
+ condition specified by event_assign() or event_new() occurs, or when the time
+ specified in timeout has elapesed. If atimeout is NULL, no timeout
+ occurs and the function will only be
+ called if a matching event occurs. The event in the
+ ev argument must be already initialized by event_assign() or event_new()
+ and may not be used
+ in calls to event_assign() until it is no longer pending.
+
+ If the event in the ev argument already has a scheduled timeout, calling
+ event_add() replaces the old timeout with the new one if tv is non-NULL.
+
+ @param ev an event struct initialized via event_assign() or event_new()
+ @param timeout the maximum amount of time to wait for the event, or NULL
+ to wait forever
+ @return 0 if successful, or -1 if an error occurred
+ @see event_del(), event_assign(), event_new()
+ */
+EVENT2_EXPORT_SYMBOL
+int event_add(struct event *ev, const struct timeval *timeout);
+
+/**
+ Remove a timer from a pending event without removing the event itself.
+
+ If the event has a scheduled timeout, this function unschedules it but
+ leaves the event otherwise pending.
+
+ @param ev an event struct initialized via event_assign() or event_new()
+ @return 0 on success, or -1 if an error occurrect.
+*/
+EVENT2_EXPORT_SYMBOL
+int event_remove_timer(struct event *ev);
+
+/**
+ Remove an event from the set of monitored events.
+
+ The function event_del() will cancel the event in the argument ev. If the
+ event has already executed or has never been added the call will have no
+ effect.
+
+ @param ev an event struct to be removed from the working set
+ @return 0 if successful, or -1 if an error occurred
+ @see event_add()
+ */
+EVENT2_EXPORT_SYMBOL
+int event_del(struct event *);
+
+/**
+ As event_del(), but never blocks while the event's callback is running
+ in another thread, even if the event was constructed without the
+ EV_FINALIZE flag.
+
+ THIS IS AN EXPERIMENTAL API. IT MIGHT CHANGE BEFORE THE LIBEVENT 2.1 SERIES
+ BECOMES STABLE.
+ */
+EVENT2_EXPORT_SYMBOL
+int event_del_noblock(struct event *ev);
+/**
+ As event_del(), but always blocks while the event's callback is running
+ in another thread, even if the event was constructed with the
+ EV_FINALIZE flag.
+
+ THIS IS AN EXPERIMENTAL API. IT MIGHT CHANGE BEFORE THE LIBEVENT 2.1 SERIES
+ BECOMES STABLE.
+ */
+EVENT2_EXPORT_SYMBOL
+int event_del_block(struct event *ev);
+
+/**
+ Make an event active.
+
+ You can use this function on a pending or a non-pending event to make it
+ active, so that its callback will be run by event_base_dispatch() or
+ event_base_loop().
+
+ One common use in multithreaded programs is to wake the thread running
+ event_base_loop() from another thread.
+
+ @param ev an event to make active.
+ @param res a set of flags to pass to the event's callback.
+ @param ncalls an obsolete argument: this is ignored.
+ **/
+EVENT2_EXPORT_SYMBOL
+void event_active(struct event *ev, int res, short ncalls);
+
+/**
+ Checks if a specific event is pending or scheduled.
+
+ @param ev an event struct previously passed to event_add()
+ @param events the requested event type; any of EV_TIMEOUT|EV_READ|
+ EV_WRITE|EV_SIGNAL
+ @param tv if this field is not NULL, and the event has a timeout,
+ this field is set to hold the time at which the timeout will
+ expire.
+
+ @return true if the event is pending on any of the events in 'what', (that
+ is to say, it has been added), or 0 if the event is not added.
+ */
+EVENT2_EXPORT_SYMBOL
+int event_pending(const struct event *ev, short events, struct timeval *tv);
+
+/**
+ If called from within the callback for an event, returns that event.
+
+ The behavior of this function is not defined when called from outside the
+ callback function for an event.
+ */
+EVENT2_EXPORT_SYMBOL
+struct event *event_base_get_running_event(struct event_base *base);
+
+/**
+ Test if an event structure might be initialized.
+
+ The event_initialized() function can be used to check if an event has been
+ initialized.
+
+ Warning: This function is only useful for distinguishing a a zeroed-out
+ piece of memory from an initialized event, it can easily be confused by
+ uninitialized memory. Thus, it should ONLY be used to distinguish an
+ initialized event from zero.
+
+ @param ev an event structure to be tested
+ @return 1 if the structure might be initialized, or 0 if it has not been
+ initialized
+ */
+EVENT2_EXPORT_SYMBOL
+int event_initialized(const struct event *ev);
+
+/**
+ Get the signal number assigned to a signal event
+*/
+#define event_get_signal(ev) ((int)event_get_fd(ev))
+
+/**
+ Get the socket or signal assigned to an event, or -1 if the event has
+ no socket.
+*/
+EVENT2_EXPORT_SYMBOL
+evutil_socket_t event_get_fd(const struct event *ev);
+
+/**
+ Get the event_base associated with an event.
+*/
+EVENT2_EXPORT_SYMBOL
+struct event_base *event_get_base(const struct event *ev);
+
+/**
+ Return the events (EV_READ, EV_WRITE, etc) assigned to an event.
+*/
+EVENT2_EXPORT_SYMBOL
+short event_get_events(const struct event *ev);
+
+/**
+ Return the callback assigned to an event.
+*/
+EVENT2_EXPORT_SYMBOL
+event_callback_fn event_get_callback(const struct event *ev);
+
+/**
+ Return the callback argument assigned to an event.
+*/
+EVENT2_EXPORT_SYMBOL
+void *event_get_callback_arg(const struct event *ev);
+
+/**
+ Return the priority of an event.
+ @see event_priority_init(), event_get_priority()
+*/
+EVENT2_EXPORT_SYMBOL
+int event_get_priority(const struct event *ev);
+
+/**
+ Extract _all_ of arguments given to construct a given event. The
+ event_base is copied into *base_out, the fd is copied into *fd_out, and so
+ on.
+
+ If any of the "_out" arguments is NULL, it will be ignored.
+ */
+EVENT2_EXPORT_SYMBOL
+void event_get_assignment(const struct event *event,
+ struct event_base **base_out, evutil_socket_t *fd_out, short *events_out,
+ event_callback_fn *callback_out, void **arg_out);
+
+/**
+ Return the size of struct event that the Libevent library was compiled
+ with.
+
+ This will be NO GREATER than sizeof(struct event) if you're running with
+ the same version of Libevent that your application was built with, but
+ otherwise might not.
+
+ Note that it might be SMALLER than sizeof(struct event) if some future
+ version of Libevent adds extra padding to the end of struct event.
+ We might do this to help ensure ABI-compatibility between different
+ versions of Libevent.
+ */
+EVENT2_EXPORT_SYMBOL
+size_t event_get_struct_event_size(void);
+
+/**
+ Get the Libevent version.
+
+ Note that this will give you the version of the library that you're
+ currently linked against, not the version of the headers that you've
+ compiled against.
+
+ @return a string containing the version number of Libevent
+*/
+EVENT2_EXPORT_SYMBOL
+const char *event_get_version(void);
+
+/**
+ Return a numeric representation of Libevent's version.
+
+ Note that this will give you the version of the library that you're
+ currently linked against, not the version of the headers you've used to
+ compile.
+
+ The format uses one byte each for the major, minor, and patchlevel parts of
+ the version number. The low-order byte is unused. For example, version
+ 2.0.1-alpha has a numeric representation of 0x02000100
+*/
+EVENT2_EXPORT_SYMBOL
+ev_uint32_t event_get_version_number(void);
+
+/** As event_get_version, but gives the version of Libevent's headers. */
+#define LIBEVENT_VERSION EVENT__VERSION
+/** As event_get_version_number, but gives the version number of Libevent's
+ * headers. */
+#define LIBEVENT_VERSION_NUMBER EVENT__NUMERIC_VERSION
+
+/** Largest number of priorities that Libevent can support. */
+#define EVENT_MAX_PRIORITIES 256
+/**
+ Set the number of different event priorities
+
+ By default Libevent schedules all active events with the same priority.
+ However, some time it is desirable to process some events with a higher
+ priority than others. For that reason, Libevent supports strict priority
+ queues. Active events with a lower priority are always processed before
+ events with a higher priority.
+
+ The number of different priorities can be set initially with the
+ event_base_priority_init() function. This function should be called
+ before the first call to event_base_dispatch(). The
+ event_priority_set() function can be used to assign a priority to an
+ event. By default, Libevent assigns the middle priority to all events
+ unless their priority is explicitly set.
+
+ Note that urgent-priority events can starve less-urgent events: after
+ running all urgent-priority callbacks, Libevent checks for more urgent
+ events again, before running less-urgent events. Less-urgent events
+ will not have their callbacks run until there are no events more urgent
+ than them that want to be active.
+
+ @param eb the event_base structure returned by event_base_new()
+ @param npriorities the maximum number of priorities
+ @return 0 if successful, or -1 if an error occurred
+ @see event_priority_set()
+ */
+EVENT2_EXPORT_SYMBOL
+int event_base_priority_init(struct event_base *, int);
+
+/**
+ Get the number of different event priorities.
+
+ @param eb the event_base structure returned by event_base_new()
+ @return Number of different event priorities
+ @see event_base_priority_init()
+*/
+EVENT2_EXPORT_SYMBOL
+int event_base_get_npriorities(struct event_base *eb);
+
+/**
+ Assign a priority to an event.
+
+ @param ev an event struct
+ @param priority the new priority to be assigned
+ @return 0 if successful, or -1 if an error occurred
+ @see event_priority_init(), event_get_priority()
+ */
+EVENT2_EXPORT_SYMBOL
+int event_priority_set(struct event *, int);
+
+/**
+ Prepare an event_base to use a large number of timeouts with the same
+ duration.
+
+ Libevent's default scheduling algorithm is optimized for having a large
+ number of timeouts with their durations more or less randomly
+ distributed. But if you have a large number of timeouts that all have
+ the same duration (for example, if you have a large number of
+ connections that all have a 10-second timeout), then you can improve
+ Libevent's performance by telling Libevent about it.
+
+ To do this, call this function with the common duration. It will return a
+ pointer to a different, opaque timeout value. (Don't depend on its actual
+ contents!) When you use this timeout value in event_add(), Libevent will
+ schedule the event more efficiently.
+
+ (This optimization probably will not be worthwhile until you have thousands
+ or tens of thousands of events with the same timeout.)
+ */
+EVENT2_EXPORT_SYMBOL
+const struct timeval *event_base_init_common_timeout(struct event_base *base,
+ const struct timeval *duration);
+
+#if !defined(EVENT__DISABLE_MM_REPLACEMENT) || defined(EVENT_IN_DOXYGEN_)
+/**
+ Override the functions that Libevent uses for memory management.
+
+ Usually, Libevent uses the standard libc functions malloc, realloc, and
+ free to allocate memory. Passing replacements for those functions to
+ event_set_mem_functions() overrides this behavior.
+
+ Note that all memory returned from Libevent will be allocated by the
+ replacement functions rather than by malloc() and realloc(). Thus, if you
+ have replaced those functions, it will not be appropriate to free() memory
+ that you get from Libevent. Instead, you must use the free_fn replacement
+ that you provided.
+
+ Note also that if you are going to call this function, you should do so
+ before any call to any Libevent function that does allocation.
+ Otherwise, those funtions will allocate their memory using malloc(), but
+ then later free it using your provided free_fn.
+
+ @param malloc_fn A replacement for malloc.
+ @param realloc_fn A replacement for realloc
+ @param free_fn A replacement for free.
+ **/
+EVENT2_EXPORT_SYMBOL
+void event_set_mem_functions(
+ void *(*malloc_fn)(size_t sz),
+ void *(*realloc_fn)(void *ptr, size_t sz),
+ void (*free_fn)(void *ptr));
+/** This definition is present if Libevent was built with support for
+ event_set_mem_functions() */
+#define EVENT_SET_MEM_FUNCTIONS_IMPLEMENTED
+#endif
+
+/**
+ Writes a human-readable description of all inserted and/or active
+ events to a provided stdio stream.
+
+ This is intended for debugging; its format is not guaranteed to be the same
+ between libevent versions.
+
+ @param base An event_base on which to scan the events.
+ @param output A stdio file to write on.
+ */
+EVENT2_EXPORT_SYMBOL
+void event_base_dump_events(struct event_base *, FILE *);
+
+
+/**
+ Activates all pending events for the given fd and event mask.
+
+ This function activates pending events only. Events which have not been
+ added will not become active.
+
+ @param base the event_base on which to activate the events.
+ @param fd An fd to active events on.
+ @param events One or more of EV_{READ,WRITE}.
+ */
+EVENT2_EXPORT_SYMBOL
+void event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events);
+
+/**
+ Activates all pending signals with a given signal number
+
+ This function activates pending events only. Events which have not been
+ added will not become active.
+
+ @param base the event_base on which to activate the events.
+ @param fd The signal to active events on.
+ */
+EVENT2_EXPORT_SYMBOL
+void event_base_active_by_signal(struct event_base *base, int sig);
+
+/**
+ * Callback for iterating events in an event base via event_base_foreach_event
+ */
+typedef int (*event_base_foreach_event_cb)(const struct event_base *, const struct event *, void *);
+
+/**
+ Iterate over all added or active events events in an event loop, and invoke
+ a given callback on each one.
+
+ The callback must not call any function that modifies the event base, that
+ modifies any event in the event base, or that adds or removes any event to
+ the event base. Doing so is unsupported and will lead to undefined
+ behavior -- likely, to crashes.
+
+ event_base_foreach_event() holds a lock on the event_base() for the whole
+ time it's running: slow callbacks are not advisable.
+
+ Note that Libevent adds some events of its own to make pieces of its
+ functionality work. You must not assume that the only events you'll
+ encounter will be the ones you added yourself.
+
+ The callback function must return 0 to continue iteration, or some other
+ integer to stop iterating.
+
+ @param base An event_base on which to scan the events.
+ @param fn A callback function to receive the events.
+ @param arg An argument passed to the callback function.
+ @return 0 if we iterated over every event, or the value returned by the
+ callback function if the loop exited early.
+*/
+EVENT2_EXPORT_SYMBOL
+int event_base_foreach_event(struct event_base *base, event_base_foreach_event_cb fn, void *arg);
+
+
+/** Sets 'tv' to the current time (as returned by gettimeofday()),
+ looking at the cached value in 'base' if possible, and calling
+ gettimeofday() or clock_gettime() as appropriate if there is no
+ cached time.
+
+ Generally, this value will only be cached while actually
+ processing event callbacks, and may be very inaccuate if your
+ callbacks take a long time to execute.
+
+ Returns 0 on success, negative on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int event_base_gettimeofday_cached(struct event_base *base,
+ struct timeval *tv);
+
+/** Update cached_tv in the 'base' to the current time
+ *
+ * You can use this function is useful for selectively increasing
+ * the accuracy of the cached time value in 'base' during callbacks
+ * that take a long time to execute.
+ *
+ * This function has no effect if the base is currently not in its
+ * event loop, or if timeval caching is disabled via
+ * EVENT_BASE_FLAG_NO_CACHE_TIME.
+ *
+ * @return 0 on success, -1 on failure
+ */
+EVENT2_EXPORT_SYMBOL
+int event_base_update_cache_time(struct event_base *base);
+
+/** Release up all globally-allocated resources allocated by Libevent.
+
+ This function does not free developer-controlled resources like
+ event_bases, events, bufferevents, listeners, and so on. It only releases
+ resources like global locks that there is no other way to free.
+
+ It is not actually necessary to call this function before exit: every
+ resource that it frees would be released anyway on exit. It mainly exists
+ so that resource-leak debugging tools don't see Libevent as holding
+ resources at exit.
+
+ You should only call this function when no other Libevent functions will
+ be invoked -- e.g., when cleanly exiting a program.
+ */
+EVENT2_EXPORT_SYMBOL
+void libevent_global_shutdown(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT2_EVENT_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/event_compat.h b/libs/libevent/include/event2/event_compat.h
new file mode 100644
index 0000000000..5110175a1b
--- /dev/null
+++ b/libs/libevent/include/event2/event_compat.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_EVENT_COMPAT_H_INCLUDED_
+#define EVENT2_EVENT_COMPAT_H_INCLUDED_
+
+/** @file event2/event_compat.h
+
+ Potentially non-threadsafe versions of the functions in event.h: provided
+ only for backwards compatibility.
+
+ In the oldest versions of Libevent, event_base was not a first-class
+ structure. Instead, there was a single event base that every function
+ manipulated. Later, when separate event bases were added, the old functions
+ that didn't take an event_base argument needed to work by manipulating the
+ "current" event base. This could lead to thread-safety issues, and obscure,
+ hard-to-diagnose bugs.
+
+ @deprecated All functions in this file are by definition deprecated.
+ */
+#include <event2/visibility.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <event2/event-config.h>
+#ifdef EVENT__HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+/* For int types. */
+#include <event2/util.h>
+
+/**
+ Initialize the event API.
+
+ The event API needs to be initialized with event_init() before it can be
+ used. Sets the global current base that gets used for events that have no
+ base associated with them.
+
+ @deprecated This function is deprecated because it replaces the "current"
+ event_base, and is totally unsafe for multithreaded use. The replacement
+ is event_base_new().
+
+ @see event_base_set(), event_base_new()
+ */
+EVENT2_EXPORT_SYMBOL
+struct event_base *event_init(void);
+
+/**
+ Loop to process events.
+
+ Like event_base_dispatch(), but uses the "current" base.
+
+ @deprecated This function is deprecated because it is easily confused by
+ multiple calls to event_init(), and because it is not safe for
+ multithreaded use. The replacement is event_base_dispatch().
+
+ @see event_base_dispatch(), event_init()
+ */
+EVENT2_EXPORT_SYMBOL
+int event_dispatch(void);
+
+/**
+ Handle events.
+
+ This function behaves like event_base_loop(), but uses the "current" base
+
+ @deprecated This function is deprecated because it uses the event base from
+ the last call to event_init, and is therefore not safe for multithreaded
+ use. The replacement is event_base_loop().
+
+ @see event_base_loop(), event_init()
+*/
+EVENT2_EXPORT_SYMBOL
+int event_loop(int);
+
+
+/**
+ Exit the event loop after the specified time.
+
+ This function behaves like event_base_loopexit(), except that it uses the
+ "current" base.
+
+ @deprecated This function is deprecated because it uses the event base from
+ the last call to event_init, and is therefore not safe for multithreaded
+ use. The replacement is event_base_loopexit().
+
+ @see event_init, event_base_loopexit()
+ */
+EVENT2_EXPORT_SYMBOL
+int event_loopexit(const struct timeval *);
+
+
+/**
+ Abort the active event_loop() immediately.
+
+ This function behaves like event_base_loopbreakt(), except that it uses the
+ "current" base.
+
+ @deprecated This function is deprecated because it uses the event base from
+ the last call to event_init, and is therefore not safe for multithreaded
+ use. The replacement is event_base_loopbreak().
+
+ @see event_base_loopbreak(), event_init()
+ */
+EVENT2_EXPORT_SYMBOL
+int event_loopbreak(void);
+
+/**
+ Schedule a one-time event to occur.
+
+ @deprecated This function is obsolete, and has been replaced by
+ event_base_once(). Its use is deprecated because it relies on the
+ "current" base configured by event_init().
+
+ @see event_base_once()
+ */
+EVENT2_EXPORT_SYMBOL
+int event_once(evutil_socket_t , short,
+ void (*)(evutil_socket_t, short, void *), void *, const struct timeval *);
+
+
+/**
+ Get the kernel event notification mechanism used by Libevent.
+
+ @deprecated This function is obsolete, and has been replaced by
+ event_base_get_method(). Its use is deprecated because it relies on the
+ "current" base configured by event_init().
+
+ @see event_base_get_method()
+ */
+EVENT2_EXPORT_SYMBOL
+const char *event_get_method(void);
+
+
+/**
+ Set the number of different event priorities.
+
+ @deprecated This function is deprecated because it is easily confused by
+ multiple calls to event_init(), and because it is not safe for
+ multithreaded use. The replacement is event_base_priority_init().
+
+ @see event_base_priority_init()
+ */
+EVENT2_EXPORT_SYMBOL
+int event_priority_init(int);
+
+/**
+ Prepare an event structure to be added.
+
+ @deprecated event_set() is not recommended for new code, because it requires
+ a subsequent call to event_base_set() to be safe under most circumstances.
+ Use event_assign() or event_new() instead.
+ */
+EVENT2_EXPORT_SYMBOL
+void event_set(struct event *, evutil_socket_t, short, void (*)(evutil_socket_t, short, void *), void *);
+
+#define evtimer_set(ev, cb, arg) event_set((ev), -1, 0, (cb), (arg))
+#define evsignal_set(ev, x, cb, arg) \
+ event_set((ev), (x), EV_SIGNAL|EV_PERSIST, (cb), (arg))
+
+
+/**
+ @name timeout_* macros
+
+ @deprecated These macros are deprecated because their naming is inconsistent
+ with the rest of Libevent. Use the evtimer_* macros instead.
+ @{
+ */
+#define timeout_add(ev, tv) event_add((ev), (tv))
+#define timeout_set(ev, cb, arg) event_set((ev), -1, 0, (cb), (arg))
+#define timeout_del(ev) event_del(ev)
+#define timeout_pending(ev, tv) event_pending((ev), EV_TIMEOUT, (tv))
+#define timeout_initialized(ev) event_initialized(ev)
+/**@}*/
+
+/**
+ @name signal_* macros
+
+ @deprecated These macros are deprecated because their naming is inconsistent
+ with the rest of Libevent. Use the evsignal_* macros instead.
+ @{
+ */
+#define signal_add(ev, tv) event_add((ev), (tv))
+#define signal_set(ev, x, cb, arg) \
+ event_set((ev), (x), EV_SIGNAL|EV_PERSIST, (cb), (arg))
+#define signal_del(ev) event_del(ev)
+#define signal_pending(ev, tv) event_pending((ev), EV_SIGNAL, (tv))
+#define signal_initialized(ev) event_initialized(ev)
+/**@}*/
+
+#ifndef EVENT_FD
+/* These macros are obsolete; use event_get_fd and event_get_signal instead. */
+#define EVENT_FD(ev) ((int)event_get_fd(ev))
+#define EVENT_SIGNAL(ev) event_get_signal(ev)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT2_EVENT_COMPAT_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/event_struct.h b/libs/libevent/include/event2/event_struct.h
new file mode 100644
index 0000000000..1c8b71b6b0
--- /dev/null
+++ b/libs/libevent/include/event2/event_struct.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_EVENT_STRUCT_H_INCLUDED_
+#define EVENT2_EVENT_STRUCT_H_INCLUDED_
+
+/** @file event2/event_struct.h
+
+ Structures used by event.h. Using these structures directly WILL harm
+ forward compatibility: be careful.
+
+ No field declared in this file should be used directly in user code. Except
+ for historical reasons, these fields would not be exposed at all.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <event2/event-config.h>
+#ifdef EVENT__HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+/* For int types. */
+#include <event2/util.h>
+
+/* For evkeyvalq */
+#include <event2/keyvalq_struct.h>
+
+#define EVLIST_TIMEOUT 0x01
+#define EVLIST_INSERTED 0x02
+#define EVLIST_SIGNAL 0x04
+#define EVLIST_ACTIVE 0x08
+#define EVLIST_INTERNAL 0x10
+#define EVLIST_ACTIVE_LATER 0x20
+#define EVLIST_FINALIZING 0x40
+#define EVLIST_INIT 0x80
+
+#define EVLIST_ALL 0xff
+
+/* Fix so that people don't have to run with <sys/queue.h> */
+#ifndef TAILQ_ENTRY
+#define EVENT_DEFINED_TQENTRY_
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+}
+#endif /* !TAILQ_ENTRY */
+
+#ifndef TAILQ_HEAD
+#define EVENT_DEFINED_TQHEAD_
+#define TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; \
+ struct type **tqh_last; \
+}
+#endif
+
+/* Fix so that people don't have to run with <sys/queue.h> */
+#ifndef LIST_ENTRY
+#define EVENT_DEFINED_LISTENTRY_
+#define LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+#endif /* !LIST_ENTRY */
+
+#ifndef LIST_HEAD
+#define EVENT_DEFINED_LISTHEAD_
+#define LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+ }
+#endif /* !LIST_HEAD */
+
+struct event;
+
+struct event_callback {
+ TAILQ_ENTRY(event_callback) evcb_active_next;
+ short evcb_flags;
+ ev_uint8_t evcb_pri; /* smaller numbers are higher priority */
+ ev_uint8_t evcb_closure;
+ /* allows us to adopt for different types of events */
+ union {
+ void (*evcb_callback)(evutil_socket_t, short, void *);
+ void (*evcb_selfcb)(struct event_callback *, void *);
+ void (*evcb_evfinalize)(struct event *, void *);
+ void (*evcb_cbfinalize)(struct event_callback *, void *);
+ } evcb_cb_union;
+ void *evcb_arg;
+};
+
+struct event_base;
+struct event {
+ struct event_callback ev_evcallback;
+
+ /* for managing timeouts */
+ union {
+ TAILQ_ENTRY(event) ev_next_with_common_timeout;
+ int min_heap_idx;
+ } ev_timeout_pos;
+ evutil_socket_t ev_fd;
+
+ struct event_base *ev_base;
+
+ union {
+ /* used for io events */
+ struct {
+ LIST_ENTRY (event) ev_io_next;
+ struct timeval ev_timeout;
+ } ev_io;
+
+ /* used by signal events */
+ struct {
+ LIST_ENTRY (event) ev_signal_next;
+ short ev_ncalls;
+ /* Allows deletes in callback */
+ short *ev_pncalls;
+ } ev_signal;
+ } ev_;
+
+ short ev_events;
+ short ev_res; /* result passed to event callback */
+ struct timeval ev_timeout;
+};
+
+TAILQ_HEAD (event_list, event);
+
+#ifdef EVENT_DEFINED_TQENTRY_
+#undef TAILQ_ENTRY
+#endif
+
+#ifdef EVENT_DEFINED_TQHEAD_
+#undef TAILQ_HEAD
+#endif
+
+LIST_HEAD (event_dlist, event);
+
+#ifdef EVENT_DEFINED_LISTENTRY_
+#undef LIST_ENTRY
+#endif
+
+#ifdef EVENT_DEFINED_LISTHEAD_
+#undef LIST_HEAD
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT2_EVENT_STRUCT_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/http.h b/libs/libevent/include/event2/http.h
new file mode 100644
index 0000000000..e99782073f
--- /dev/null
+++ b/libs/libevent/include/event2/http.h
@@ -0,0 +1,1170 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_HTTP_H_INCLUDED_
+#define EVENT2_HTTP_H_INCLUDED_
+
+/* For int types. */
+#include <event2/util.h>
+#include <event2/visibility.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* In case we haven't included the right headers yet. */
+struct evbuffer;
+struct event_base;
+struct bufferevent;
+struct evhttp_connection;
+
+/** @file event2/http.h
+ *
+ * Basic support for HTTP serving.
+ *
+ * As Libevent is a library for dealing with event notification and most
+ * interesting applications are networked today, I have often found the
+ * need to write HTTP code. The following prototypes and definitions provide
+ * an application with a minimal interface for making HTTP requests and for
+ * creating a very simple HTTP server.
+ */
+
+/* Response codes */
+#define HTTP_OK 200 /**< request completed ok */
+#define HTTP_NOCONTENT 204 /**< request does not have content */
+#define HTTP_MOVEPERM 301 /**< the uri moved permanently */
+#define HTTP_MOVETEMP 302 /**< the uri moved temporarily */
+#define HTTP_NOTMODIFIED 304 /**< page was not modified from last */
+#define HTTP_BADREQUEST 400 /**< invalid http request was made */
+#define HTTP_NOTFOUND 404 /**< could not find content for uri */
+#define HTTP_BADMETHOD 405 /**< method not allowed for this uri */
+#define HTTP_ENTITYTOOLARGE 413 /**< */
+#define HTTP_EXPECTATIONFAILED 417 /**< we can't handle this expectation */
+#define HTTP_INTERNAL 500 /**< internal error */
+#define HTTP_NOTIMPLEMENTED 501 /**< not implemented */
+#define HTTP_SERVUNAVAIL 503 /**< the server is not available */
+
+struct evhttp;
+struct evhttp_request;
+struct evkeyvalq;
+struct evhttp_bound_socket;
+struct evconnlistener;
+struct evdns_base;
+
+/**
+ * Create a new HTTP server.
+ *
+ * @param base (optional) the event base to receive the HTTP events
+ * @return a pointer to a newly initialized evhttp server structure
+ * @see evhttp_free()
+ */
+EVENT2_EXPORT_SYMBOL
+struct evhttp *evhttp_new(struct event_base *base);
+
+/**
+ * Binds an HTTP server on the specified address and port.
+ *
+ * Can be called multiple times to bind the same http server
+ * to multiple different ports.
+ *
+ * @param http a pointer to an evhttp object
+ * @param address a string containing the IP address to listen(2) on
+ * @param port the port number to listen on
+ * @return 0 on success, -1 on failure.
+ * @see evhttp_accept_socket()
+ */
+EVENT2_EXPORT_SYMBOL
+int evhttp_bind_socket(struct evhttp *http, const char *address, ev_uint16_t port);
+
+/**
+ * Like evhttp_bind_socket(), but returns a handle for referencing the socket.
+ *
+ * The returned pointer is not valid after \a http is freed.
+ *
+ * @param http a pointer to an evhttp object
+ * @param address a string containing the IP address to listen(2) on
+ * @param port the port number to listen on
+ * @return Handle for the socket on success, NULL on failure.
+ * @see evhttp_bind_socket(), evhttp_del_accept_socket()
+ */
+EVENT2_EXPORT_SYMBOL
+struct evhttp_bound_socket *evhttp_bind_socket_with_handle(struct evhttp *http, const char *address, ev_uint16_t port);
+
+/**
+ * Makes an HTTP server accept connections on the specified socket.
+ *
+ * This may be useful to create a socket and then fork multiple instances
+ * of an http server, or when a socket has been communicated via file
+ * descriptor passing in situations where an http servers does not have
+ * permissions to bind to a low-numbered port.
+ *
+ * Can be called multiple times to have the http server listen to
+ * multiple different sockets.
+ *
+ * @param http a pointer to an evhttp object
+ * @param fd a socket fd that is ready for accepting connections
+ * @return 0 on success, -1 on failure.
+ * @see evhttp_bind_socket()
+ */
+EVENT2_EXPORT_SYMBOL
+int evhttp_accept_socket(struct evhttp *http, evutil_socket_t fd);
+
+/**
+ * Like evhttp_accept_socket(), but returns a handle for referencing the socket.
+ *
+ * The returned pointer is not valid after \a http is freed.
+ *
+ * @param http a pointer to an evhttp object
+ * @param fd a socket fd that is ready for accepting connections
+ * @return Handle for the socket on success, NULL on failure.
+ * @see evhttp_accept_socket(), evhttp_del_accept_socket()
+ */
+EVENT2_EXPORT_SYMBOL
+struct evhttp_bound_socket *evhttp_accept_socket_with_handle(struct evhttp *http, evutil_socket_t fd);
+
+/**
+ * The most low-level evhttp_bind/accept method: takes an evconnlistener, and
+ * returns an evhttp_bound_socket. The listener will be freed when the bound
+ * socket is freed.
+ */
+EVENT2_EXPORT_SYMBOL
+struct evhttp_bound_socket *evhttp_bind_listener(struct evhttp *http, struct evconnlistener *listener);
+
+/**
+ * Return the listener used to implement a bound socket.
+ */
+EVENT2_EXPORT_SYMBOL
+struct evconnlistener *evhttp_bound_socket_get_listener(struct evhttp_bound_socket *bound);
+
+typedef void evhttp_bound_socket_foreach_fn(struct evhttp_bound_socket *, void *);
+/**
+ * Applies the function specified in the first argument to all
+ * evhttp_bound_sockets associated with "http". The user must not
+ * attempt to free or remove any connections, sockets or listeners
+ * in the callback "function".
+ *
+ * @param http pointer to an evhttp object
+ * @param function function to apply to every bound socket
+ * @param argument pointer value passed to function for every socket iterated
+ */
+EVENT2_EXPORT_SYMBOL
+void evhttp_foreach_bound_socket(struct evhttp *http, evhttp_bound_socket_foreach_fn *function, void *argument);
+
+/**
+ * Makes an HTTP server stop accepting connections on the specified socket
+ *
+ * This may be useful when a socket has been sent via file descriptor passing
+ * and is no longer needed by the current process.
+ *
+ * If you created this bound socket with evhttp_bind_socket_with_handle or
+ * evhttp_accept_socket_with_handle, this function closes the fd you provided.
+ * If you created this bound socket with evhttp_bind_listener, this function
+ * frees the listener you provided.
+ *
+ * \a bound_socket is an invalid pointer after this call returns.
+ *
+ * @param http a pointer to an evhttp object
+ * @param bound_socket a handle returned by evhttp_{bind,accept}_socket_with_handle
+ * @see evhttp_bind_socket_with_handle(), evhttp_accept_socket_with_handle()
+ */
+EVENT2_EXPORT_SYMBOL
+void evhttp_del_accept_socket(struct evhttp *http, struct evhttp_bound_socket *bound_socket);
+
+/**
+ * Get the raw file descriptor referenced by an evhttp_bound_socket.
+ *
+ * @param bound_socket a handle returned by evhttp_{bind,accept}_socket_with_handle
+ * @return the file descriptor used by the bound socket
+ * @see evhttp_bind_socket_with_handle(), evhttp_accept_socket_with_handle()
+ */
+EVENT2_EXPORT_SYMBOL
+evutil_socket_t evhttp_bound_socket_get_fd(struct evhttp_bound_socket *bound_socket);
+
+/**
+ * Free the previously created HTTP server.
+ *
+ * Works only if no requests are currently being served.
+ *
+ * @param http the evhttp server object to be freed
+ * @see evhttp_start()
+ */
+EVENT2_EXPORT_SYMBOL
+void evhttp_free(struct evhttp* http);
+
+/** XXX Document. */
+EVENT2_EXPORT_SYMBOL
+void evhttp_set_max_headers_size(struct evhttp* http, ev_ssize_t max_headers_size);
+/** XXX Document. */
+EVENT2_EXPORT_SYMBOL
+void evhttp_set_max_body_size(struct evhttp* http, ev_ssize_t max_body_size);
+
+/**
+ Set the value to use for the Content-Type header when none was provided. If
+ the content type string is NULL, the Content-Type header will not be
+ automatically added.
+
+ @param http the http server on which to set the default content type
+ @param content_type the value for the Content-Type header
+*/
+EVENT2_EXPORT_SYMBOL
+void evhttp_set_default_content_type(struct evhttp *http,
+ const char *content_type);
+
+/**
+ Sets the what HTTP methods are supported in requests accepted by this
+ server, and passed to user callbacks.
+
+ If not supported they will generate a "405 Method not allowed" response.
+
+ By default this includes the following methods: GET, POST, HEAD, PUT, DELETE
+
+ @param http the http server on which to set the methods
+ @param methods bit mask constructed from evhttp_cmd_type values
+*/
+EVENT2_EXPORT_SYMBOL
+void evhttp_set_allowed_methods(struct evhttp* http, ev_uint16_t methods);
+
+/**
+ Set a callback for a specified URI
+
+ @param http the http sever on which to set the callback
+ @param path the path for which to invoke the callback
+ @param cb the callback function that gets invoked on requesting path
+ @param cb_arg an additional context argument for the callback
+ @return 0 on success, -1 if the callback existed already, -2 on failure
+*/
+EVENT2_EXPORT_SYMBOL
+int evhttp_set_cb(struct evhttp *http, const char *path,
+ void (*cb)(struct evhttp_request *, void *), void *cb_arg);
+
+/** Removes the callback for a specified URI */
+EVENT2_EXPORT_SYMBOL
+int evhttp_del_cb(struct evhttp *, const char *);
+
+/**
+ Set a callback for all requests that are not caught by specific callbacks
+
+ Invokes the specified callback for all requests that do not match any of
+ the previously specified request paths. This is catchall for requests not
+ specifically configured with evhttp_set_cb().
+
+ @param http the evhttp server object for which to set the callback
+ @param cb the callback to invoke for any unmatched requests
+ @param arg an context argument for the callback
+*/
+EVENT2_EXPORT_SYMBOL
+void evhttp_set_gencb(struct evhttp *http,
+ void (*cb)(struct evhttp_request *, void *), void *arg);
+
+/**
+ Set a callback used to create new bufferevents for connections
+ to a given evhttp object.
+
+ You can use this to override the default bufferevent type -- for example,
+ to make this evhttp object use SSL bufferevents rather than unencrypted
+ ones.
+
+ New bufferevents must be allocated with no fd set on them.
+
+ @param http the evhttp server object for which to set the callback
+ @param cb the callback to invoke for incoming connections
+ @param arg an context argument for the callback
+ */
+EVENT2_EXPORT_SYMBOL
+void evhttp_set_bevcb(struct evhttp *http,
+ struct bufferevent *(*cb)(struct event_base *, void *), void *arg);
+
+/**
+ Adds a virtual host to the http server.
+
+ A virtual host is a newly initialized evhttp object that has request
+ callbacks set on it via evhttp_set_cb() or evhttp_set_gencb(). It
+ most not have any listing sockets associated with it.
+
+ If the virtual host has not been removed by the time that evhttp_free()
+ is called on the main http server, it will be automatically freed, too.
+
+ It is possible to have hierarchical vhosts. For example: A vhost
+ with the pattern *.example.com may have other vhosts with patterns
+ foo.example.com and bar.example.com associated with it.
+
+ @param http the evhttp object to which to add a virtual host
+ @param pattern the glob pattern against which the hostname is matched.
+ The match is case insensitive and follows otherwise regular shell
+ matching.
+ @param vhost the virtual host to add the regular http server.
+ @return 0 on success, -1 on failure
+ @see evhttp_remove_virtual_host()
+*/
+EVENT2_EXPORT_SYMBOL
+int evhttp_add_virtual_host(struct evhttp* http, const char *pattern,
+ struct evhttp* vhost);
+
+/**
+ Removes a virtual host from the http server.
+
+ @param http the evhttp object from which to remove the virtual host
+ @param vhost the virtual host to remove from the regular http server.
+ @return 0 on success, -1 on failure
+ @see evhttp_add_virtual_host()
+*/
+EVENT2_EXPORT_SYMBOL
+int evhttp_remove_virtual_host(struct evhttp* http, struct evhttp* vhost);
+
+/**
+ Add a server alias to an http object. The http object can be a virtual
+ host or the main server.
+
+ @param http the evhttp object
+ @param alias the alias to add
+ @see evhttp_add_remove_alias()
+*/
+EVENT2_EXPORT_SYMBOL
+int evhttp_add_server_alias(struct evhttp *http, const char *alias);
+
+/**
+ Remove a server alias from an http object.
+
+ @param http the evhttp object
+ @param alias the alias to remove
+ @see evhttp_add_server_alias()
+*/
+EVENT2_EXPORT_SYMBOL
+int evhttp_remove_server_alias(struct evhttp *http, const char *alias);
+
+/**
+ * Set the timeout for an HTTP request.
+ *
+ * @param http an evhttp object
+ * @param timeout_in_secs the timeout, in seconds
+ */
+EVENT2_EXPORT_SYMBOL
+void evhttp_set_timeout(struct evhttp *http, int timeout_in_secs);
+
+/**
+ * Set the timeout for an HTTP request.
+ *
+ * @param http an evhttp object
+ * @param tv the timeout, or NULL
+ */
+EVENT2_EXPORT_SYMBOL
+void evhttp_set_timeout_tv(struct evhttp *http, const struct timeval* tv);
+
+/* Request/Response functionality */
+
+/**
+ * Send an HTML error message to the client.
+ *
+ * @param req a request object
+ * @param error the HTTP error code
+ * @param reason a brief explanation of the error. If this is NULL, we'll
+ * just use the standard meaning of the error code.
+ */
+EVENT2_EXPORT_SYMBOL
+void evhttp_send_error(struct evhttp_request *req, int error,
+ const char *reason);
+
+/**
+ * Send an HTML reply to the client.
+ *
+ * The body of the reply consists of the data in databuf. After calling
+ * evhttp_send_reply() databuf will be empty, but the buffer is still
+ * owned by the caller and needs to be deallocated by the caller if
+ * necessary.
+ *
+ * @param req a request object
+ * @param code the HTTP response code to send
+ * @param reason a brief message to send with the response code
+ * @param databuf the body of the response
+ */
+EVENT2_EXPORT_SYMBOL
+void evhttp_send_reply(struct evhttp_request *req, int code,
+ const char *reason, struct evbuffer *databuf);
+
+/* Low-level response interface, for streaming/chunked replies */
+
+/**
+ Initiate a reply that uses Transfer-Encoding chunked.
+
+ This allows the caller to stream the reply back to the client and is
+ useful when either not all of the reply data is immediately available
+ or when sending very large replies.
+
+ The caller needs to supply data chunks with evhttp_send_reply_chunk()
+ and complete the reply by calling evhttp_send_reply_end().
+
+ @param req a request object
+ @param code the HTTP response code to send
+ @param reason a brief message to send with the response code
+*/
+EVENT2_EXPORT_SYMBOL
+void evhttp_send_reply_start(struct evhttp_request *req, int code,
+ const char *reason);
+
+/**
+ Send another data chunk as part of an ongoing chunked reply.
+
+ The reply chunk consists of the data in databuf. After calling
+ evhttp_send_reply_chunk() databuf will be empty, but the buffer is
+ still owned by the caller and needs to be deallocated by the caller
+ if necessary.
+
+ @param req a request object
+ @param databuf the data chunk to send as part of the reply.
+*/
+EVENT2_EXPORT_SYMBOL
+void evhttp_send_reply_chunk(struct evhttp_request *req,
+ struct evbuffer *databuf);
+
+/**
+ Send another data chunk as part of an ongoing chunked reply.
+
+ The reply chunk consists of the data in databuf. After calling
+ evhttp_send_reply_chunk() databuf will be empty, but the buffer is
+ still owned by the caller and needs to be deallocated by the caller
+ if necessary.
+
+ @param req a request object
+ @param databuf the data chunk to send as part of the reply.
+ @param cb callback funcion
+ @param call back's argument.
+*/
+EVENT2_EXPORT_SYMBOL
+void evhttp_send_reply_chunk_with_cb(struct evhttp_request *, struct evbuffer *,
+ void (*cb)(struct evhttp_connection *, void *), void *arg);
+
+/**
+ Complete a chunked reply, freeing the request as appropriate.
+
+ @param req a request object
+*/
+EVENT2_EXPORT_SYMBOL
+void evhttp_send_reply_end(struct evhttp_request *req);
+
+/*
+ * Interfaces for making requests
+ */
+
+/** The different request types supported by evhttp. These are as specified
+ * in RFC2616, except for PATCH which is specified by RFC5789.
+ *
+ * By default, only some of these methods are accepted and passed to user
+ * callbacks; use evhttp_set_allowed_methods() to change which methods
+ * are allowed.
+ */
+enum evhttp_cmd_type {
+ EVHTTP_REQ_GET = 1 << 0,
+ EVHTTP_REQ_POST = 1 << 1,
+ EVHTTP_REQ_HEAD = 1 << 2,
+ EVHTTP_REQ_PUT = 1 << 3,
+ EVHTTP_REQ_DELETE = 1 << 4,
+ EVHTTP_REQ_OPTIONS = 1 << 5,
+ EVHTTP_REQ_TRACE = 1 << 6,
+ EVHTTP_REQ_CONNECT = 1 << 7,
+ EVHTTP_REQ_PATCH = 1 << 8
+};
+
+/** a request object can represent either a request or a reply */
+enum evhttp_request_kind { EVHTTP_REQUEST, EVHTTP_RESPONSE };
+
+/**
+ * Create and return a connection object that can be used to for making HTTP
+ * requests. The connection object tries to resolve address and establish the
+ * connection when it is given an http request object.
+ *
+ * @param base the event_base to use for handling the connection
+ * @param dnsbase the dns_base to use for resolving host names; if not
+ * specified host name resolution will block.
+ * @param bev a bufferevent to use for connecting to the server; if NULL, a
+ * socket-based bufferevent will be created. This buffrevent will be freed
+ * when the connection closes. It must have no fd set on it.
+ * @param address the address to which to connect
+ * @param port the port to connect to
+ * @return an evhttp_connection object that can be used for making requests
+ */
+EVENT2_EXPORT_SYMBOL
+struct evhttp_connection *evhttp_connection_base_bufferevent_new(
+ struct event_base *base, struct evdns_base *dnsbase, struct bufferevent* bev, const char *address, unsigned short port);
+
+/**
+ * Return the bufferevent that an evhttp_connection is using.
+ */
+EVENT2_EXPORT_SYMBOL
+struct bufferevent* evhttp_connection_get_bufferevent(struct evhttp_connection *evcon);
+
+/**
+ * Return the HTTP server associated with this connection, or NULL.
+ */
+EVENT2_EXPORT_SYMBOL
+struct evhttp *evhttp_connection_get_server(struct evhttp_connection *evcon);
+
+/**
+ * Creates a new request object that needs to be filled in with the request
+ * parameters. The callback is executed when the request completed or an
+ * error occurred.
+ */
+EVENT2_EXPORT_SYMBOL
+struct evhttp_request *evhttp_request_new(
+ void (*cb)(struct evhttp_request *, void *), void *arg);
+
+/**
+ * Enable delivery of chunks to requestor.
+ * @param cb will be called after every read of data with the same argument
+ * as the completion callback. Will never be called on an empty
+ * response. May drain the input buffer; it will be drained
+ * automatically on return.
+ */
+EVENT2_EXPORT_SYMBOL
+void evhttp_request_set_chunked_cb(struct evhttp_request *,
+ void (*cb)(struct evhttp_request *, void *));
+
+/**
+ * Register callback for additional parsing of request headers.
+ * @param cb will be called after receiving and parsing the full header.
+ * It allows analyzing the header and possibly closing the connection
+ * by returning a value < 0.
+ */
+EVENT2_EXPORT_SYMBOL
+void evhttp_request_set_header_cb(struct evhttp_request *,
+ int (*cb)(struct evhttp_request *, void *));
+
+/**
+ * The different error types supported by evhttp
+ *
+ * @see evhttp_request_set_error_cb()
+ */
+enum evhttp_request_error {
+ /**
+ * Timeout reached, also @see evhttp_connection_set_timeout()
+ */
+ EVREQ_HTTP_TIMEOUT,
+ /**
+ * EOF reached
+ */
+ EVREQ_HTTP_EOF,
+ /**
+ * Error while reading header, or invalid header
+ */
+ EVREQ_HTTP_INVALID_HEADER,
+ /**
+ * Error encountered while reading or writing
+ */
+ EVREQ_HTTP_BUFFER_ERROR,
+ /**
+ * The evhttp_cancel_request() called on this request.
+ */
+ EVREQ_HTTP_REQUEST_CANCEL,
+ /**
+ * Body is greater then evhttp_connection_set_max_body_size()
+ */
+ EVREQ_HTTP_DATA_TOO_LONG
+};
+/**
+ * Set a callback for errors
+ * @see evhttp_request_error for error types.
+ *
+ * On error, both the error callback and the regular callback will be called,
+ * error callback is called before the regular callback.
+ **/
+EVENT2_EXPORT_SYMBOL
+void evhttp_request_set_error_cb(struct evhttp_request *,
+ void (*)(enum evhttp_request_error, void *));
+
+/**
+ * Set a callback to be called on request completion of evhttp_send_* function.
+ *
+ * The callback function will be called on the completion of the request after
+ * the output data has been written and before the evhttp_request object
+ * is destroyed. This can be useful for tracking resources associated with a
+ * request (ex: timing metrics).
+ *
+ * @param req a request object
+ * @param cb callback function that will be called on request completion
+ * @param cb_arg an additional context argument for the callback
+ */
+EVENT2_EXPORT_SYMBOL
+void evhttp_request_set_on_complete_cb(struct evhttp_request *req,
+ void (*cb)(struct evhttp_request *, void *), void *cb_arg);
+
+/** Frees the request object and removes associated events. */
+EVENT2_EXPORT_SYMBOL
+void evhttp_request_free(struct evhttp_request *req);
+
+/**
+ * Create and return a connection object that can be used to for making HTTP
+ * requests. The connection object tries to resolve address and establish the
+ * connection when it is given an http request object.
+ *
+ * @param base the event_base to use for handling the connection
+ * @param dnsbase the dns_base to use for resolving host names; if not
+ * specified host name resolution will block.
+ * @param address the address to which to connect
+ * @param port the port to connect to
+ * @return an evhttp_connection object that can be used for making requests
+ */
+EVENT2_EXPORT_SYMBOL
+struct evhttp_connection *evhttp_connection_base_new(
+ struct event_base *base, struct evdns_base *dnsbase,
+ const char *address, unsigned short port);
+
+/**
+ * Set family hint for DNS requests.
+ */
+EVENT2_EXPORT_SYMBOL
+void evhttp_connection_set_family(struct evhttp_connection *evcon,
+ int family);
+
+/* reuse connection address on retry */
+#define EVHTTP_CON_REUSE_CONNECTED_ADDR 0x0008
+/* Padding for public flags, @see EVHTTP_CON_* in http-internal.h */
+#define EVHTTP_CON_PUBLIC_FLAGS_END 0x100000
+/**
+ * Set connection flags.
+ *
+ * @see EVHTTP_CON_*
+ * @return 0 on success, otherwise non zero (for example if flag doesn't
+ * supported).
+ */
+EVENT2_EXPORT_SYMBOL
+int evhttp_connection_set_flags(struct evhttp_connection *evcon,
+ int flags);
+
+/** Takes ownership of the request object
+ *
+ * Can be used in a request callback to keep onto the request until
+ * evhttp_request_free() is explicitly called by the user.
+ */
+EVENT2_EXPORT_SYMBOL
+void evhttp_request_own(struct evhttp_request *req);
+
+/** Returns 1 if the request is owned by the user */
+EVENT2_EXPORT_SYMBOL
+int evhttp_request_is_owned(struct evhttp_request *req);
+
+/**
+ * Returns the connection object associated with the request or NULL
+ *
+ * The user needs to either free the request explicitly or call
+ * evhttp_send_reply_end().
+ */
+EVENT2_EXPORT_SYMBOL
+struct evhttp_connection *evhttp_request_get_connection(struct evhttp_request *req);
+
+/**
+ * Returns the underlying event_base for this connection
+ */
+EVENT2_EXPORT_SYMBOL
+struct event_base *evhttp_connection_get_base(struct evhttp_connection *req);
+
+EVENT2_EXPORT_SYMBOL
+void evhttp_connection_set_max_headers_size(struct evhttp_connection *evcon,
+ ev_ssize_t new_max_headers_size);
+
+EVENT2_EXPORT_SYMBOL
+void evhttp_connection_set_max_body_size(struct evhttp_connection* evcon,
+ ev_ssize_t new_max_body_size);
+
+/** Frees an http connection */
+EVENT2_EXPORT_SYMBOL
+void evhttp_connection_free(struct evhttp_connection *evcon);
+
+/** Disowns a given connection object
+ *
+ * Can be used to tell libevent to free the connection object after
+ * the last request has completed or failed.
+ */
+EVENT2_EXPORT_SYMBOL
+void evhttp_connection_free_on_completion(struct evhttp_connection *evcon);
+
+/** sets the ip address from which http connections are made */
+EVENT2_EXPORT_SYMBOL
+void evhttp_connection_set_local_address(struct evhttp_connection *evcon,
+ const char *address);
+
+/** sets the local port from which http connections are made */
+EVENT2_EXPORT_SYMBOL
+void evhttp_connection_set_local_port(struct evhttp_connection *evcon,
+ ev_uint16_t port);
+
+/** Sets the timeout in seconds for events related to this connection */
+EVENT2_EXPORT_SYMBOL
+void evhttp_connection_set_timeout(struct evhttp_connection *evcon,
+ int timeout_in_secs);
+
+/** Sets the timeout for events related to this connection. Takes a struct
+ * timeval. */
+EVENT2_EXPORT_SYMBOL
+void evhttp_connection_set_timeout_tv(struct evhttp_connection *evcon,
+ const struct timeval *tv);
+
+/** Sets the delay before retrying requests on this connection. This is only
+ * used if evhttp_connection_set_retries is used to make the number of retries
+ * at least one. Each retry after the first is twice as long as the one before
+ * it. */
+EVENT2_EXPORT_SYMBOL
+void evhttp_connection_set_initial_retry_tv(struct evhttp_connection *evcon,
+ const struct timeval *tv);
+
+/** Sets the retry limit for this connection - -1 repeats indefinitely */
+EVENT2_EXPORT_SYMBOL
+void evhttp_connection_set_retries(struct evhttp_connection *evcon,
+ int retry_max);
+
+/** Set a callback for connection close. */
+EVENT2_EXPORT_SYMBOL
+void evhttp_connection_set_closecb(struct evhttp_connection *evcon,
+ void (*)(struct evhttp_connection *, void *), void *);
+
+/** Get the remote address and port associated with this connection. */
+EVENT2_EXPORT_SYMBOL
+void evhttp_connection_get_peer(struct evhttp_connection *evcon,
+ char **address, ev_uint16_t *port);
+
+/** Get the remote address associated with this connection.
+ * extracted from getpeername() OR from nameserver.
+ *
+ * @return NULL if getpeername() return non success,
+ * or connection is not connected,
+ * otherwise it return pointer to struct sockaddr_storage */
+EVENT2_EXPORT_SYMBOL
+const struct sockaddr*
+evhttp_connection_get_addr(struct evhttp_connection *evcon);
+
+/**
+ Make an HTTP request over the specified connection.
+
+ The connection gets ownership of the request. On failure, the
+ request object is no longer valid as it has been freed.
+
+ @param evcon the evhttp_connection object over which to send the request
+ @param req the previously created and configured request object
+ @param type the request type EVHTTP_REQ_GET, EVHTTP_REQ_POST, etc.
+ @param uri the URI associated with the request
+ @return 0 on success, -1 on failure
+ @see evhttp_cancel_request()
+*/
+EVENT2_EXPORT_SYMBOL
+int evhttp_make_request(struct evhttp_connection *evcon,
+ struct evhttp_request *req,
+ enum evhttp_cmd_type type, const char *uri);
+
+/**
+ Cancels a pending HTTP request.
+
+ Cancels an ongoing HTTP request. The callback associated with this request
+ is not executed and the request object is freed. If the request is
+ currently being processed, e.g. it is ongoing, the corresponding
+ evhttp_connection object is going to get reset.
+
+ A request cannot be canceled if its callback has executed already. A request
+ may be canceled reentrantly from its chunked callback.
+
+ @param req the evhttp_request to cancel; req becomes invalid after this call.
+*/
+EVENT2_EXPORT_SYMBOL
+void evhttp_cancel_request(struct evhttp_request *req);
+
+/**
+ * A structure to hold a parsed URI or Relative-Ref conforming to RFC3986.
+ */
+struct evhttp_uri;
+
+/** Returns the request URI */
+EVENT2_EXPORT_SYMBOL
+const char *evhttp_request_get_uri(const struct evhttp_request *req);
+/** Returns the request URI (parsed) */
+EVENT2_EXPORT_SYMBOL
+const struct evhttp_uri *evhttp_request_get_evhttp_uri(const struct evhttp_request *req);
+/** Returns the request command */
+EVENT2_EXPORT_SYMBOL
+enum evhttp_cmd_type evhttp_request_get_command(const struct evhttp_request *req);
+
+EVENT2_EXPORT_SYMBOL
+int evhttp_request_get_response_code(const struct evhttp_request *req);
+EVENT2_EXPORT_SYMBOL
+const char * evhttp_request_get_response_code_line(const struct evhttp_request *req);
+
+/** Returns the input headers */
+EVENT2_EXPORT_SYMBOL
+struct evkeyvalq *evhttp_request_get_input_headers(struct evhttp_request *req);
+/** Returns the output headers */
+EVENT2_EXPORT_SYMBOL
+struct evkeyvalq *evhttp_request_get_output_headers(struct evhttp_request *req);
+/** Returns the input buffer */
+EVENT2_EXPORT_SYMBOL
+struct evbuffer *evhttp_request_get_input_buffer(struct evhttp_request *req);
+/** Returns the output buffer */
+EVENT2_EXPORT_SYMBOL
+struct evbuffer *evhttp_request_get_output_buffer(struct evhttp_request *req);
+/** Returns the host associated with the request. If a client sends an absolute
+ URI, the host part of that is preferred. Otherwise, the input headers are
+ searched for a Host: header. NULL is returned if no absolute URI or Host:
+ header is provided. */
+EVENT2_EXPORT_SYMBOL
+const char *evhttp_request_get_host(struct evhttp_request *req);
+
+/* Interfaces for dealing with HTTP headers */
+
+/**
+ Finds the value belonging to a header.
+
+ @param headers the evkeyvalq object in which to find the header
+ @param key the name of the header to find
+ @returns a pointer to the value for the header or NULL if the header
+ could not be found.
+ @see evhttp_add_header(), evhttp_remove_header()
+*/
+EVENT2_EXPORT_SYMBOL
+const char *evhttp_find_header(const struct evkeyvalq *headers,
+ const char *key);
+
+/**
+ Removes a header from a list of existing headers.
+
+ @param headers the evkeyvalq object from which to remove a header
+ @param key the name of the header to remove
+ @returns 0 if the header was removed, -1 otherwise.
+ @see evhttp_find_header(), evhttp_add_header()
+*/
+EVENT2_EXPORT_SYMBOL
+int evhttp_remove_header(struct evkeyvalq *headers, const char *key);
+
+/**
+ Adds a header to a list of existing headers.
+
+ @param headers the evkeyvalq object to which to add a header
+ @param key the name of the header
+ @param value the value belonging to the header
+ @returns 0 on success, -1 otherwise.
+ @see evhttp_find_header(), evhttp_clear_headers()
+*/
+EVENT2_EXPORT_SYMBOL
+int evhttp_add_header(struct evkeyvalq *headers, const char *key, const char *value);
+
+/**
+ Removes all headers from the header list.
+
+ @param headers the evkeyvalq object from which to remove all headers
+*/
+EVENT2_EXPORT_SYMBOL
+void evhttp_clear_headers(struct evkeyvalq *headers);
+
+/* Miscellaneous utility functions */
+
+
+/**
+ Helper function to encode a string for inclusion in a URI. All
+ characters are replaced by their hex-escaped (%22) equivalents,
+ except for characters explicitly unreserved by RFC3986 -- that is,
+ ASCII alphanumeric characters, hyphen, dot, underscore, and tilde.
+
+ The returned string must be freed by the caller.
+
+ @param str an unencoded string
+ @return a newly allocated URI-encoded string or NULL on failure
+ */
+EVENT2_EXPORT_SYMBOL
+char *evhttp_encode_uri(const char *str);
+
+/**
+ As evhttp_encode_uri, but if 'size' is nonnegative, treat the string
+ as being 'size' bytes long. This allows you to encode strings that
+ may contain 0-valued bytes.
+
+ The returned string must be freed by the caller.
+
+ @param str an unencoded string
+ @param size the length of the string to encode, or -1 if the string
+ is NUL-terminated
+ @param space_to_plus if true, space characters in 'str' are encoded
+ as +, not %20.
+ @return a newly allocate URI-encoded string, or NULL on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+char *evhttp_uriencode(const char *str, ev_ssize_t size, int space_to_plus);
+
+/**
+ Helper function to sort of decode a URI-encoded string. Unlike
+ evhttp_get_decoded_uri, it decodes all plus characters that appear
+ _after_ the first question mark character, but no plusses that occur
+ before. This is not a good way to decode URIs in whole or in part.
+
+ The returned string must be freed by the caller
+
+ @deprecated This function is deprecated; you probably want to use
+ evhttp_get_decoded_uri instead.
+
+ @param uri an encoded URI
+ @return a newly allocated unencoded URI or NULL on failure
+ */
+EVENT2_EXPORT_SYMBOL
+char *evhttp_decode_uri(const char *uri);
+
+/**
+ Helper function to decode a URI-escaped string or HTTP parameter.
+
+ If 'decode_plus' is 1, then we decode the string as an HTTP parameter
+ value, and convert all plus ('+') characters to spaces. If
+ 'decode_plus' is 0, we leave all plus characters unchanged.
+
+ The returned string must be freed by the caller.
+
+ @param uri a URI-encode encoded URI
+ @param decode_plus determines whether we convert '+' to space.
+ @param size_out if size_out is not NULL, *size_out is set to the size of the
+ returned string
+ @return a newly allocated unencoded URI or NULL on failure
+ */
+EVENT2_EXPORT_SYMBOL
+char *evhttp_uridecode(const char *uri, int decode_plus,
+ size_t *size_out);
+
+/**
+ Helper function to parse out arguments in a query.
+
+ Parsing a URI like
+
+ http://foo.com/?q=test&s=some+thing
+
+ will result in two entries in the key value queue.
+
+ The first entry is: key="q", value="test"
+ The second entry is: key="s", value="some thing"
+
+ @deprecated This function is deprecated as of Libevent 2.0.9. Use
+ evhttp_uri_parse and evhttp_parse_query_str instead.
+
+ @param uri the request URI
+ @param headers the head of the evkeyval queue
+ @return 0 on success, -1 on failure
+ */
+EVENT2_EXPORT_SYMBOL
+int evhttp_parse_query(const char *uri, struct evkeyvalq *headers);
+
+/**
+ Helper function to parse out arguments from the query portion of an
+ HTTP URI.
+
+ Parsing a query string like
+
+ q=test&s=some+thing
+
+ will result in two entries in the key value queue.
+
+ The first entry is: key="q", value="test"
+ The second entry is: key="s", value="some thing"
+
+ @param query_parse the query portion of the URI
+ @param headers the head of the evkeyval queue
+ @return 0 on success, -1 on failure
+ */
+EVENT2_EXPORT_SYMBOL
+int evhttp_parse_query_str(const char *uri, struct evkeyvalq *headers);
+
+/**
+ * Escape HTML character entities in a string.
+ *
+ * Replaces <, >, ", ' and & with &lt;, &gt;, &quot;,
+ * &#039; and &amp; correspondingly.
+ *
+ * The returned string needs to be freed by the caller.
+ *
+ * @param html an unescaped HTML string
+ * @return an escaped HTML string or NULL on error
+ */
+EVENT2_EXPORT_SYMBOL
+char *evhttp_htmlescape(const char *html);
+
+/**
+ * Return a new empty evhttp_uri with no fields set.
+ */
+EVENT2_EXPORT_SYMBOL
+struct evhttp_uri *evhttp_uri_new(void);
+
+/**
+ * Changes the flags set on a given URI. See EVHTTP_URI_* for
+ * a list of flags.
+ **/
+EVENT2_EXPORT_SYMBOL
+void evhttp_uri_set_flags(struct evhttp_uri *uri, unsigned flags);
+
+/** Return the scheme of an evhttp_uri, or NULL if there is no scheme has
+ * been set and the evhttp_uri contains a Relative-Ref. */
+EVENT2_EXPORT_SYMBOL
+const char *evhttp_uri_get_scheme(const struct evhttp_uri *uri);
+/**
+ * Return the userinfo part of an evhttp_uri, or NULL if it has no userinfo
+ * set.
+ */
+EVENT2_EXPORT_SYMBOL
+const char *evhttp_uri_get_userinfo(const struct evhttp_uri *uri);
+/**
+ * Return the host part of an evhttp_uri, or NULL if it has no host set.
+ * The host may either be a regular hostname (conforming to the RFC 3986
+ * "regname" production), or an IPv4 address, or the empty string, or a
+ * bracketed IPv6 address, or a bracketed 'IP-Future' address.
+ *
+ * Note that having a NULL host means that the URI has no authority
+ * section, but having an empty-string host means that the URI has an
+ * authority section with no host part. For example,
+ * "mailto:user@example.com" has a host of NULL, but "file:///etc/motd"
+ * has a host of "".
+ */
+EVENT2_EXPORT_SYMBOL
+const char *evhttp_uri_get_host(const struct evhttp_uri *uri);
+/** Return the port part of an evhttp_uri, or -1 if there is no port set. */
+EVENT2_EXPORT_SYMBOL
+int evhttp_uri_get_port(const struct evhttp_uri *uri);
+/** Return the path part of an evhttp_uri, or NULL if it has no path set */
+EVENT2_EXPORT_SYMBOL
+const char *evhttp_uri_get_path(const struct evhttp_uri *uri);
+/** Return the query part of an evhttp_uri (excluding the leading "?"), or
+ * NULL if it has no query set */
+EVENT2_EXPORT_SYMBOL
+const char *evhttp_uri_get_query(const struct evhttp_uri *uri);
+/** Return the fragment part of an evhttp_uri (excluding the leading "#"),
+ * or NULL if it has no fragment set */
+EVENT2_EXPORT_SYMBOL
+const char *evhttp_uri_get_fragment(const struct evhttp_uri *uri);
+
+/** Set the scheme of an evhttp_uri, or clear the scheme if scheme==NULL.
+ * Returns 0 on success, -1 if scheme is not well-formed. */
+EVENT2_EXPORT_SYMBOL
+int evhttp_uri_set_scheme(struct evhttp_uri *uri, const char *scheme);
+/** Set the userinfo of an evhttp_uri, or clear the userinfo if userinfo==NULL.
+ * Returns 0 on success, -1 if userinfo is not well-formed. */
+EVENT2_EXPORT_SYMBOL
+int evhttp_uri_set_userinfo(struct evhttp_uri *uri, const char *userinfo);
+/** Set the host of an evhttp_uri, or clear the host if host==NULL.
+ * Returns 0 on success, -1 if host is not well-formed. */
+EVENT2_EXPORT_SYMBOL
+int evhttp_uri_set_host(struct evhttp_uri *uri, const char *host);
+/** Set the port of an evhttp_uri, or clear the port if port==-1.
+ * Returns 0 on success, -1 if port is not well-formed. */
+EVENT2_EXPORT_SYMBOL
+int evhttp_uri_set_port(struct evhttp_uri *uri, int port);
+/** Set the path of an evhttp_uri, or clear the path if path==NULL.
+ * Returns 0 on success, -1 if path is not well-formed. */
+EVENT2_EXPORT_SYMBOL
+int evhttp_uri_set_path(struct evhttp_uri *uri, const char *path);
+/** Set the query of an evhttp_uri, or clear the query if query==NULL.
+ * The query should not include a leading "?".
+ * Returns 0 on success, -1 if query is not well-formed. */
+EVENT2_EXPORT_SYMBOL
+int evhttp_uri_set_query(struct evhttp_uri *uri, const char *query);
+/** Set the fragment of an evhttp_uri, or clear the fragment if fragment==NULL.
+ * The fragment should not include a leading "#".
+ * Returns 0 on success, -1 if fragment is not well-formed. */
+EVENT2_EXPORT_SYMBOL
+int evhttp_uri_set_fragment(struct evhttp_uri *uri, const char *fragment);
+
+/**
+ * Helper function to parse a URI-Reference as specified by RFC3986.
+ *
+ * This function matches the URI-Reference production from RFC3986,
+ * which includes both URIs like
+ *
+ * scheme://[[userinfo]@]foo.com[:port]]/[path][?query][#fragment]
+ *
+ * and relative-refs like
+ *
+ * [path][?query][#fragment]
+ *
+ * Any optional elements portions not present in the original URI are
+ * left set to NULL in the resulting evhttp_uri. If no port is
+ * specified, the port is set to -1.
+ *
+ * Note that no decoding is performed on percent-escaped characters in
+ * the string; if you want to parse them, use evhttp_uridecode or
+ * evhttp_parse_query_str as appropriate.
+ *
+ * Note also that most URI schemes will have additional constraints that
+ * this function does not know about, and cannot check. For example,
+ * mailto://www.example.com/cgi-bin/fortune.pl is not a reasonable
+ * mailto url, http://www.example.com:99999/ is not a reasonable HTTP
+ * URL, and ftp:username@example.com is not a reasonable FTP URL.
+ * Nevertheless, all of these URLs conform to RFC3986, and this function
+ * accepts all of them as valid.
+ *
+ * @param source_uri the request URI
+ * @param flags Zero or more EVHTTP_URI_* flags to affect the behavior
+ * of the parser.
+ * @return uri container to hold parsed data, or NULL if there is error
+ * @see evhttp_uri_free()
+ */
+EVENT2_EXPORT_SYMBOL
+struct evhttp_uri *evhttp_uri_parse_with_flags(const char *source_uri,
+ unsigned flags);
+
+/** Tolerate URIs that do not conform to RFC3986.
+ *
+ * Unfortunately, some HTTP clients generate URIs that, according to RFC3986,
+ * are not conformant URIs. If you need to support these URIs, you can
+ * do so by passing this flag to evhttp_uri_parse_with_flags.
+ *
+ * Currently, these changes are:
+ * <ul>
+ * <li> Nonconformant URIs are allowed to contain otherwise unreasonable
+ * characters in their path, query, and fragment components.
+ * </ul>
+ */
+#define EVHTTP_URI_NONCONFORMANT 0x01
+
+/** Alias for evhttp_uri_parse_with_flags(source_uri, 0) */
+EVENT2_EXPORT_SYMBOL
+struct evhttp_uri *evhttp_uri_parse(const char *source_uri);
+
+/**
+ * Free all memory allocated for a parsed uri. Only use this for URIs
+ * generated by evhttp_uri_parse.
+ *
+ * @param uri container with parsed data
+ * @see evhttp_uri_parse()
+ */
+EVENT2_EXPORT_SYMBOL
+void evhttp_uri_free(struct evhttp_uri *uri);
+
+/**
+ * Join together the uri parts from parsed data to form a URI-Reference.
+ *
+ * Note that no escaping of reserved characters is done on the members
+ * of the evhttp_uri, so the generated string might not be a valid URI
+ * unless the members of evhttp_uri are themselves valid.
+ *
+ * @param uri container with parsed data
+ * @param buf destination buffer
+ * @param limit destination buffer size
+ * @return an joined uri as string or NULL on error
+ * @see evhttp_uri_parse()
+ */
+EVENT2_EXPORT_SYMBOL
+char *evhttp_uri_join(struct evhttp_uri *uri, char *buf, size_t limit);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT2_HTTP_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/http_compat.h b/libs/libevent/include/event2/http_compat.h
new file mode 100644
index 0000000000..0d9af17f3f
--- /dev/null
+++ b/libs/libevent/include/event2/http_compat.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_HTTP_COMPAT_H_INCLUDED_
+#define EVENT2_HTTP_COMPAT_H_INCLUDED_
+
+/** @file event2/http_compat.h
+
+ Potentially non-threadsafe versions of the functions in http.h: provided
+ only for backwards compatibility.
+
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <event2/event-config.h>
+#ifdef EVENT__HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+/* For int types. */
+#include <event2/util.h>
+
+/**
+ * Start an HTTP server on the specified address and port
+ *
+ * @deprecated It does not allow an event base to be specified
+ *
+ * @param address the address to which the HTTP server should be bound
+ * @param port the port number on which the HTTP server should listen
+ * @return an struct evhttp object
+ */
+struct evhttp *evhttp_start(const char *address, unsigned short port);
+
+/**
+ * A connection object that can be used to for making HTTP requests. The
+ * connection object tries to establish the connection when it is given an
+ * http request object.
+ *
+ * @deprecated It does not allow an event base to be specified
+ */
+struct evhttp_connection *evhttp_connection_new(
+ const char *address, unsigned short port);
+
+/**
+ * Associates an event base with the connection - can only be called
+ * on a freshly created connection object that has not been used yet.
+ *
+ * @deprecated XXXX Why?
+ */
+void evhttp_connection_set_base(struct evhttp_connection *evcon,
+ struct event_base *base);
+
+
+/** Returns the request URI */
+#define evhttp_request_uri evhttp_request_get_uri
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT2_EVENT_COMPAT_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/http_struct.h b/libs/libevent/include/event2/http_struct.h
new file mode 100644
index 0000000000..4bf5b1ff60
--- /dev/null
+++ b/libs/libevent/include/event2/http_struct.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_HTTP_STRUCT_H_INCLUDED_
+#define EVENT2_HTTP_STRUCT_H_INCLUDED_
+
+/** @file event2/http_struct.h
+
+ Data structures for http. Using these structures may hurt forward
+ compatibility with later versions of Libevent: be careful!
+
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <event2/event-config.h>
+#ifdef EVENT__HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+/* For int types. */
+#include <event2/util.h>
+
+/**
+ * the request structure that a server receives.
+ * WARNING: expect this structure to change. I will try to provide
+ * reasonable accessors.
+ */
+struct evhttp_request {
+#if defined(TAILQ_ENTRY)
+ TAILQ_ENTRY(evhttp_request) next;
+#else
+struct {
+ struct evhttp_request *tqe_next;
+ struct evhttp_request **tqe_prev;
+} next;
+#endif
+
+ /* the connection object that this request belongs to */
+ struct evhttp_connection *evcon;
+ int flags;
+/** The request obj owns the evhttp connection and needs to free it */
+#define EVHTTP_REQ_OWN_CONNECTION 0x0001
+/** Request was made via a proxy */
+#define EVHTTP_PROXY_REQUEST 0x0002
+/** The request object is owned by the user; the user must free it */
+#define EVHTTP_USER_OWNED 0x0004
+/** The request will be used again upstack; freeing must be deferred */
+#define EVHTTP_REQ_DEFER_FREE 0x0008
+/** The request should be freed upstack */
+#define EVHTTP_REQ_NEEDS_FREE 0x0010
+
+ struct evkeyvalq *input_headers;
+ struct evkeyvalq *output_headers;
+
+ /* address of the remote host and the port connection came from */
+ char *remote_host;
+ ev_uint16_t remote_port;
+
+ /* cache of the hostname for evhttp_request_get_host */
+ char *host_cache;
+
+ enum evhttp_request_kind kind;
+ enum evhttp_cmd_type type;
+
+ size_t headers_size;
+ size_t body_size;
+
+ char *uri; /* uri after HTTP request was parsed */
+ struct evhttp_uri *uri_elems; /* uri elements */
+
+ char major; /* HTTP Major number */
+ char minor; /* HTTP Minor number */
+
+ int response_code; /* HTTP Response code */
+ char *response_code_line; /* Readable response */
+
+ struct evbuffer *input_buffer; /* read data */
+ ev_int64_t ntoread;
+ unsigned chunked:1, /* a chunked request */
+ userdone:1; /* the user has sent all data */
+
+ struct evbuffer *output_buffer; /* outgoing post or data */
+
+ /* Callback */
+ void (*cb)(struct evhttp_request *, void *);
+ void *cb_arg;
+
+ /*
+ * Chunked data callback - call for each completed chunk if
+ * specified. If not specified, all the data is delivered via
+ * the regular callback.
+ */
+ void (*chunk_cb)(struct evhttp_request *, void *);
+
+ /*
+ * Callback added for forked-daapd so they can collect ICY
+ * (shoutcast) metadata from the http header. If return
+ * int is negative the connection will be closed.
+ */
+ int (*header_cb)(struct evhttp_request *, void *);
+
+ /*
+ * Error callback - called when error is occured.
+ * @see evhttp_request_error for error types.
+ *
+ * @see evhttp_request_set_error_cb()
+ */
+ void (*error_cb)(enum evhttp_request_error, void *);
+
+ /*
+ * Send complete callback - called when the request is actually
+ * sent and completed.
+ */
+ void (*on_complete_cb)(struct evhttp_request *, void *);
+ void *on_complete_cb_arg;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT2_HTTP_STRUCT_H_INCLUDED_ */
+
diff --git a/libs/libevent/include/event2/keyvalq_struct.h b/libs/libevent/include/event2/keyvalq_struct.h
new file mode 100644
index 0000000000..bffa54b3a7
--- /dev/null
+++ b/libs/libevent/include/event2/keyvalq_struct.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_KEYVALQ_STRUCT_H_INCLUDED_
+#define EVENT2_KEYVALQ_STRUCT_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Fix so that people don't have to run with <sys/queue.h> */
+/* XXXX This code is duplicated with event_struct.h */
+#ifndef TAILQ_ENTRY
+#define EVENT_DEFINED_TQENTRY_
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+}
+#endif /* !TAILQ_ENTRY */
+
+#ifndef TAILQ_HEAD
+#define EVENT_DEFINED_TQHEAD_
+#define TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; \
+ struct type **tqh_last; \
+}
+#endif
+
+/*
+ * Key-Value pairs. Can be used for HTTP headers but also for
+ * query argument parsing.
+ */
+struct evkeyval {
+ TAILQ_ENTRY(evkeyval) next;
+
+ char *key;
+ char *value;
+};
+
+TAILQ_HEAD (evkeyvalq, evkeyval);
+
+/* XXXX This code is duplicated with event_struct.h */
+#ifdef EVENT_DEFINED_TQENTRY_
+#undef TAILQ_ENTRY
+#endif
+
+#ifdef EVENT_DEFINED_TQHEAD_
+#undef TAILQ_HEAD
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libs/libevent/include/event2/listener.h b/libs/libevent/include/event2/listener.h
new file mode 100644
index 0000000000..84b4da055d
--- /dev/null
+++ b/libs/libevent/include/event2/listener.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_LISTENER_H_INCLUDED_
+#define EVENT2_LISTENER_H_INCLUDED_
+
+#include <event2/visibility.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <event2/event.h>
+
+struct sockaddr;
+struct evconnlistener;
+
+/**
+ A callback that we invoke when a listener has a new connection.
+
+ @param listener The evconnlistener
+ @param fd The new file descriptor
+ @param addr The source address of the connection
+ @param socklen The length of addr
+ @param user_arg the pointer passed to evconnlistener_new()
+ */
+typedef void (*evconnlistener_cb)(struct evconnlistener *, evutil_socket_t, struct sockaddr *, int socklen, void *);
+
+/**
+ A callback that we invoke when a listener encounters a non-retriable error.
+
+ @param listener The evconnlistener
+ @param user_arg the pointer passed to evconnlistener_new()
+ */
+typedef void (*evconnlistener_errorcb)(struct evconnlistener *, void *);
+
+/** Flag: Indicates that we should not make incoming sockets nonblocking
+ * before passing them to the callback. */
+#define LEV_OPT_LEAVE_SOCKETS_BLOCKING (1u<<0)
+/** Flag: Indicates that freeing the listener should close the underlying
+ * socket. */
+#define LEV_OPT_CLOSE_ON_FREE (1u<<1)
+/** Flag: Indicates that we should set the close-on-exec flag, if possible */
+#define LEV_OPT_CLOSE_ON_EXEC (1u<<2)
+/** Flag: Indicates that we should disable the timeout (if any) between when
+ * this socket is closed and when we can listen again on the same port. */
+#define LEV_OPT_REUSEABLE (1u<<3)
+/** Flag: Indicates that the listener should be locked so it's safe to use
+ * from multiple threadcs at once. */
+#define LEV_OPT_THREADSAFE (1u<<4)
+/** Flag: Indicates that the listener should be created in disabled
+ * state. Use evconnlistener_enable() to enable it later. */
+#define LEV_OPT_DISABLED (1u<<5)
+/** Flag: Indicates that the listener should defer accept() until data is
+ * available, if possible. Ignored on platforms that do not support this.
+ *
+ * This option can help performance for protocols where the client transmits
+ * immediately after connecting. Do not use this option if your protocol
+ * _doesn't_ start out with the client transmitting data, since in that case
+ * this option will sometimes cause the kernel to never tell you about the
+ * connection.
+ *
+ * This option is only supported by evconnlistener_new_bind(): it can't
+ * work with evconnlistener_new_fd(), since the listener needs to be told
+ * to use the option before it is actually bound.
+ */
+#define LEV_OPT_DEFERRED_ACCEPT (1u<<6)
+/** Flag: Indicates that we ask to allow multiple servers (processes or
+ * threads) to bind to the same port if they each set the option.
+ *
+ * SO_REUSEPORT is what most people would expect SO_REUSEADDR to be, however
+ * SO_REUSEPORT does not imply SO_REUSEADDR.
+ *
+ * This is only available on Linux and kernel 3.9+
+ */
+#define LEV_OPT_REUSEABLE_PORT (1u<<7)
+
+/**
+ Allocate a new evconnlistener object to listen for incoming TCP connections
+ on a given file descriptor.
+
+ @param base The event base to associate the listener with.
+ @param cb A callback to be invoked when a new connection arrives. If the
+ callback is NULL, the listener will be treated as disabled until the
+ callback is set.
+ @param ptr A user-supplied pointer to give to the callback.
+ @param flags Any number of LEV_OPT_* flags
+ @param backlog Passed to the listen() call to determine the length of the
+ acceptable connection backlog. Set to -1 for a reasonable default.
+ Set to 0 if the socket is already listening.
+ @param fd The file descriptor to listen on. It must be a nonblocking
+ file descriptor, and it should already be bound to an appropriate
+ port and address.
+*/
+EVENT2_EXPORT_SYMBOL
+struct evconnlistener *evconnlistener_new(struct event_base *base,
+ evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
+ evutil_socket_t fd);
+/**
+ Allocate a new evconnlistener object to listen for incoming TCP connections
+ on a given address.
+
+ @param base The event base to associate the listener with.
+ @param cb A callback to be invoked when a new connection arrives. If the
+ callback is NULL, the listener will be treated as disabled until the
+ callback is set.
+ @param ptr A user-supplied pointer to give to the callback.
+ @param flags Any number of LEV_OPT_* flags
+ @param backlog Passed to the listen() call to determine the length of the
+ acceptable connection backlog. Set to -1 for a reasonable default.
+ @param addr The address to listen for connections on.
+ @param socklen The length of the address.
+ */
+EVENT2_EXPORT_SYMBOL
+struct evconnlistener *evconnlistener_new_bind(struct event_base *base,
+ evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
+ const struct sockaddr *sa, int socklen);
+/**
+ Disable and deallocate an evconnlistener.
+ */
+EVENT2_EXPORT_SYMBOL
+void evconnlistener_free(struct evconnlistener *lev);
+/**
+ Re-enable an evconnlistener that has been disabled.
+ */
+EVENT2_EXPORT_SYMBOL
+int evconnlistener_enable(struct evconnlistener *lev);
+/**
+ Stop listening for connections on an evconnlistener.
+ */
+EVENT2_EXPORT_SYMBOL
+int evconnlistener_disable(struct evconnlistener *lev);
+
+/** Return an evconnlistener's associated event_base. */
+EVENT2_EXPORT_SYMBOL
+struct event_base *evconnlistener_get_base(struct evconnlistener *lev);
+
+/** Return the socket that an evconnlistner is listening on. */
+EVENT2_EXPORT_SYMBOL
+evutil_socket_t evconnlistener_get_fd(struct evconnlistener *lev);
+
+/** Change the callback on the listener to cb and its user_data to arg.
+ */
+EVENT2_EXPORT_SYMBOL
+void evconnlistener_set_cb(struct evconnlistener *lev,
+ evconnlistener_cb cb, void *arg);
+
+/** Set an evconnlistener's error callback. */
+EVENT2_EXPORT_SYMBOL
+void evconnlistener_set_error_cb(struct evconnlistener *lev,
+ evconnlistener_errorcb errorcb);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libs/libevent/include/event2/rpc.h b/libs/libevent/include/event2/rpc.h
new file mode 100644
index 0000000000..dd43df266a
--- /dev/null
+++ b/libs/libevent/include/event2/rpc.h
@@ -0,0 +1,596 @@
+/*
+ * Copyright (c) 2006-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_RPC_H_INCLUDED_
+#define EVENT2_RPC_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @file rpc.h
+ *
+ * This header files provides basic support for an RPC server and client.
+ *
+ * To support RPCs in a server, every supported RPC command needs to be
+ * defined and registered.
+ *
+ * EVRPC_HEADER(SendCommand, Request, Reply);
+ *
+ * SendCommand is the name of the RPC command.
+ * Request is the name of a structure generated by event_rpcgen.py.
+ * It contains all parameters relating to the SendCommand RPC. The
+ * server needs to fill in the Reply structure.
+ * Reply is the name of a structure generated by event_rpcgen.py. It
+ * contains the answer to the RPC.
+ *
+ * To register an RPC with an HTTP server, you need to first create an RPC
+ * base with:
+ *
+ * struct evrpc_base *base = evrpc_init(http);
+ *
+ * A specific RPC can then be registered with
+ *
+ * EVRPC_REGISTER(base, SendCommand, Request, Reply, FunctionCB, arg);
+ *
+ * when the server receives an appropriately formatted RPC, the user callback
+ * is invoked. The callback needs to fill in the reply structure.
+ *
+ * void FunctionCB(EVRPC_STRUCT(SendCommand)* rpc, void *arg);
+ *
+ * To send the reply, call EVRPC_REQUEST_DONE(rpc);
+ *
+ * See the regression test for an example.
+ */
+
+/**
+ Determines if the member has been set in the message
+
+ @param msg the message to inspect
+ @param member the member variable to test for presences
+ @return 1 if it's present or 0 otherwise.
+*/
+#define EVTAG_HAS(msg, member) \
+ ((msg)->member##_set == 1)
+
+#ifndef EVENT2_RPC_COMPAT_H_INCLUDED_
+
+/**
+ Assigns a value to the member in the message.
+
+ @param msg the message to which to assign a value
+ @param member the name of the member variable
+ @param value the value to assign
+*/
+#define EVTAG_ASSIGN(msg, member, value) \
+ (*(msg)->base->member##_assign)((msg), (value))
+/**
+ Assigns a value to the member in the message.
+
+ @param msg the message to which to assign a value
+ @param member the name of the member variable
+ @param value the value to assign
+ @param len the length of the value
+*/
+#define EVTAG_ASSIGN_WITH_LEN(msg, member, value, len) \
+ (*(msg)->base->member##_assign)((msg), (value), (len))
+/**
+ Returns the value for a member.
+
+ @param msg the message from which to get the value
+ @param member the name of the member variable
+ @param pvalue a pointer to the variable to hold the value
+ @return 0 on success, -1 otherwise.
+*/
+#define EVTAG_GET(msg, member, pvalue) \
+ (*(msg)->base->member##_get)((msg), (pvalue))
+/**
+ Returns the value for a member.
+
+ @param msg the message from which to get the value
+ @param member the name of the member variable
+ @param pvalue a pointer to the variable to hold the value
+ @param plen a pointer to the length of the value
+ @return 0 on success, -1 otherwise.
+*/
+#define EVTAG_GET_WITH_LEN(msg, member, pvalue, plen) \
+ (*(msg)->base->member##_get)((msg), (pvalue), (plen))
+
+#endif /* EVENT2_RPC_COMPAT_H_INCLUDED_ */
+
+/**
+ Adds a value to an array.
+*/
+#define EVTAG_ARRAY_ADD_VALUE(msg, member, value) \
+ (*(msg)->base->member##_add)((msg), (value))
+/**
+ Allocates a new entry in the array and returns it.
+*/
+#define EVTAG_ARRAY_ADD(msg, member) \
+ (*(msg)->base->member##_add)(msg)
+/**
+ Gets a variable at the specified offset from the array.
+*/
+#define EVTAG_ARRAY_GET(msg, member, offset, pvalue) \
+ (*(msg)->base->member##_get)((msg), (offset), (pvalue))
+/**
+ Returns the number of entries in the array.
+*/
+#define EVTAG_ARRAY_LEN(msg, member) ((msg)->member##_length)
+
+
+struct evbuffer;
+struct event_base;
+struct evrpc_req_generic;
+struct evrpc_request_wrapper;
+struct evrpc;
+
+/** The type of a specific RPC Message
+ *
+ * @param rpcname the name of the RPC message
+ */
+#define EVRPC_STRUCT(rpcname) struct evrpc_req__##rpcname
+
+struct evhttp_request;
+struct evrpc_status;
+struct evrpc_hook_meta;
+
+/** Creates the definitions and prototypes for an RPC
+ *
+ * You need to use EVRPC_HEADER to create structures and function prototypes
+ * needed by the server and client implementation. The structures have to be
+ * defined in an .rpc file and converted to source code via event_rpcgen.py
+ *
+ * @param rpcname the name of the RPC
+ * @param reqstruct the name of the RPC request structure
+ * @param replystruct the name of the RPC reply structure
+ * @see EVRPC_GENERATE()
+ */
+#define EVRPC_HEADER(rpcname, reqstruct, rplystruct) \
+EVRPC_STRUCT(rpcname) { \
+ struct evrpc_hook_meta *hook_meta; \
+ struct reqstruct* request; \
+ struct rplystruct* reply; \
+ struct evrpc* rpc; \
+ struct evhttp_request* http_req; \
+ struct evbuffer* rpc_data; \
+}; \
+int evrpc_send_request_##rpcname(struct evrpc_pool *, \
+ struct reqstruct *, struct rplystruct *, \
+ void (*)(struct evrpc_status *, \
+ struct reqstruct *, struct rplystruct *, void *cbarg), \
+ void *);
+
+struct evrpc_pool;
+
+/** use EVRPC_GENERATE instead */
+struct evrpc_request_wrapper *evrpc_make_request_ctx(
+ struct evrpc_pool *pool, void *request, void *reply,
+ const char *rpcname,
+ void (*req_marshal)(struct evbuffer*, void *),
+ void (*rpl_clear)(void *),
+ int (*rpl_unmarshal)(void *, struct evbuffer *),
+ void (*cb)(struct evrpc_status *, void *, void *, void *),
+ void *cbarg);
+
+/** Creates a context structure that contains rpc specific information.
+ *
+ * EVRPC_MAKE_CTX is used to populate a RPC specific context that
+ * contains information about marshaling the RPC data types.
+ *
+ * @param rpcname the name of the RPC
+ * @param reqstruct the name of the RPC request structure
+ * @param replystruct the name of the RPC reply structure
+ * @param pool the evrpc_pool over which to make the request
+ * @param request a pointer to the RPC request structure object
+ * @param reply a pointer to the RPC reply structure object
+ * @param cb the callback function to call when the RPC has completed
+ * @param cbarg the argument to supply to the callback
+ */
+#define EVRPC_MAKE_CTX(rpcname, reqstruct, rplystruct, \
+ pool, request, reply, cb, cbarg) \
+ evrpc_make_request_ctx(pool, request, reply, \
+ #rpcname, \
+ (void (*)(struct evbuffer *, void *))reqstruct##_marshal, \
+ (void (*)(void *))rplystruct##_clear, \
+ (int (*)(void *, struct evbuffer *))rplystruct##_unmarshal, \
+ (void (*)(struct evrpc_status *, void *, void *, void *))cb, \
+ cbarg)
+
+/** Generates the code for receiving and sending an RPC message
+ *
+ * EVRPC_GENERATE is used to create the code corresponding to sending
+ * and receiving a particular RPC message
+ *
+ * @param rpcname the name of the RPC
+ * @param reqstruct the name of the RPC request structure
+ * @param replystruct the name of the RPC reply structure
+ * @see EVRPC_HEADER()
+ */
+#define EVRPC_GENERATE(rpcname, reqstruct, rplystruct) \
+ int evrpc_send_request_##rpcname(struct evrpc_pool *pool, \
+ struct reqstruct *request, struct rplystruct *reply, \
+ void (*cb)(struct evrpc_status *, \
+ struct reqstruct *, struct rplystruct *, void *cbarg), \
+ void *cbarg) { \
+ return evrpc_send_request_generic(pool, request, reply, \
+ (void (*)(struct evrpc_status *, void *, void *, void *))cb, \
+ cbarg, \
+ #rpcname, \
+ (void (*)(struct evbuffer *, void *))reqstruct##_marshal, \
+ (void (*)(void *))rplystruct##_clear, \
+ (int (*)(void *, struct evbuffer *))rplystruct##_unmarshal); \
+}
+
+/** Provides access to the HTTP request object underlying an RPC
+ *
+ * Access to the underlying http object; can be used to look at headers or
+ * for getting the remote ip address
+ *
+ * @param rpc_req the rpc request structure provided to the server callback
+ * @return an struct evhttp_request object that can be inspected for
+ * HTTP headers or sender information.
+ */
+#define EVRPC_REQUEST_HTTP(rpc_req) (rpc_req)->http_req
+
+/** completes the server response to an rpc request */
+void evrpc_request_done(struct evrpc_req_generic *req);
+
+/** accessors for request and reply */
+void *evrpc_get_request(struct evrpc_req_generic *req);
+void *evrpc_get_reply(struct evrpc_req_generic *req);
+
+/** Creates the reply to an RPC request
+ *
+ * EVRPC_REQUEST_DONE is used to answer a request; the reply is expected
+ * to have been filled in. The request and reply pointers become invalid
+ * after this call has finished.
+ *
+ * @param rpc_req the rpc request structure provided to the server callback
+ */
+#define EVRPC_REQUEST_DONE(rpc_req) do { \
+ struct evrpc_req_generic *req_ = (struct evrpc_req_generic *)(rpc_req); \
+ evrpc_request_done(req_); \
+} while (0)
+
+
+struct evrpc_base;
+struct evhttp;
+
+/* functions to start up the rpc system */
+
+/** Creates a new rpc base from which RPC requests can be received
+ *
+ * @param server a pointer to an existing HTTP server
+ * @return a newly allocated evrpc_base struct
+ * @see evrpc_free()
+ */
+struct evrpc_base *evrpc_init(struct evhttp *server);
+
+/**
+ * Frees the evrpc base
+ *
+ * For now, you are responsible for making sure that no rpcs are ongoing.
+ *
+ * @param base the evrpc_base object to be freed
+ * @see evrpc_init
+ */
+void evrpc_free(struct evrpc_base *base);
+
+/** register RPCs with the HTTP Server
+ *
+ * registers a new RPC with the HTTP server, each RPC needs to have
+ * a unique name under which it can be identified.
+ *
+ * @param base the evrpc_base structure in which the RPC should be
+ * registered.
+ * @param name the name of the RPC
+ * @param request the name of the RPC request structure
+ * @param reply the name of the RPC reply structure
+ * @param callback the callback that should be invoked when the RPC
+ * is received. The callback has the following prototype
+ * void (*callback)(EVRPC_STRUCT(Message)* rpc, void *arg)
+ * @param cbarg an additional parameter that can be passed to the callback.
+ * The parameter can be used to carry around state.
+ */
+#define EVRPC_REGISTER(base, name, request, reply, callback, cbarg) \
+ evrpc_register_generic(base, #name, \
+ (void (*)(struct evrpc_req_generic *, void *))callback, cbarg, \
+ (void *(*)(void *))request##_new, NULL, \
+ (void (*)(void *))request##_free, \
+ (int (*)(void *, struct evbuffer *))request##_unmarshal, \
+ (void *(*)(void *))reply##_new, NULL, \
+ (void (*)(void *))reply##_free, \
+ (int (*)(void *))reply##_complete, \
+ (void (*)(struct evbuffer *, void *))reply##_marshal)
+
+/**
+ Low level function for registering an RPC with a server.
+
+ Use EVRPC_REGISTER() instead.
+
+ @see EVRPC_REGISTER()
+*/
+int evrpc_register_rpc(struct evrpc_base *, struct evrpc *,
+ void (*)(struct evrpc_req_generic*, void *), void *);
+
+/**
+ * Unregisters an already registered RPC
+ *
+ * @param base the evrpc_base object from which to unregister an RPC
+ * @param name the name of the rpc to unregister
+ * @return -1 on error or 0 when successful.
+ * @see EVRPC_REGISTER()
+ */
+#define EVRPC_UNREGISTER(base, name) evrpc_unregister_rpc((base), #name)
+
+int evrpc_unregister_rpc(struct evrpc_base *base, const char *name);
+
+/*
+ * Client-side RPC support
+ */
+
+struct evhttp_connection;
+struct evrpc_status;
+
+/** launches an RPC and sends it to the server
+ *
+ * EVRPC_MAKE_REQUEST() is used by the client to send an RPC to the server.
+ *
+ * @param name the name of the RPC
+ * @param pool the evrpc_pool that contains the connection objects over which
+ * the request should be sent.
+ * @param request a pointer to the RPC request structure - it contains the
+ * data to be sent to the server.
+ * @param reply a pointer to the RPC reply structure. It is going to be filled
+ * if the request was answered successfully
+ * @param cb the callback to invoke when the RPC request has been answered
+ * @param cbarg an additional argument to be passed to the client
+ * @return 0 on success, -1 on failure
+ */
+#define EVRPC_MAKE_REQUEST(name, pool, request, reply, cb, cbarg) \
+ evrpc_send_request_##name((pool), (request), (reply), (cb), (cbarg))
+
+/**
+ Makes an RPC request based on the provided context.
+
+ This is a low-level function and should not be used directly
+ unless a custom context object is provided. Use EVRPC_MAKE_REQUEST()
+ instead.
+
+ @param ctx a context from EVRPC_MAKE_CTX()
+ @returns 0 on success, -1 otherwise.
+ @see EVRPC_MAKE_REQUEST(), EVRPC_MAKE_CTX()
+*/
+int evrpc_make_request(struct evrpc_request_wrapper *ctx);
+
+/** creates an rpc connection pool
+ *
+ * a pool has a number of connections associated with it.
+ * rpc requests are always made via a pool.
+ *
+ * @param base a pointer to an struct event_based object; can be left NULL
+ * in singled-threaded applications
+ * @return a newly allocated struct evrpc_pool object
+ * @see evrpc_pool_free()
+ */
+struct evrpc_pool *evrpc_pool_new(struct event_base *base);
+/** frees an rpc connection pool
+ *
+ * @param pool a pointer to an evrpc_pool allocated via evrpc_pool_new()
+ * @see evrpc_pool_new()
+ */
+void evrpc_pool_free(struct evrpc_pool *pool);
+
+/**
+ * Adds a connection over which rpc can be dispatched to the pool.
+ *
+ * The connection object must have been newly created.
+ *
+ * @param pool the pool to which to add the connection
+ * @param evcon the connection to add to the pool.
+ */
+void evrpc_pool_add_connection(struct evrpc_pool *pool,
+ struct evhttp_connection *evcon);
+
+/**
+ * Removes a connection from the pool.
+ *
+ * The connection object must have been newly created.
+ *
+ * @param pool the pool from which to remove the connection
+ * @param evcon the connection to remove from the pool.
+ */
+void evrpc_pool_remove_connection(struct evrpc_pool *pool,
+ struct evhttp_connection *evcon);
+
+/**
+ * Sets the timeout in secs after which a request has to complete. The
+ * RPC is completely aborted if it does not complete by then. Setting
+ * the timeout to 0 means that it never timeouts and can be used to
+ * implement callback type RPCs.
+ *
+ * Any connection already in the pool will be updated with the new
+ * timeout. Connections added to the pool after set_timeout has be
+ * called receive the pool timeout only if no timeout has been set
+ * for the connection itself.
+ *
+ * @param pool a pointer to a struct evrpc_pool object
+ * @param timeout_in_secs the number of seconds after which a request should
+ * timeout and a failure be returned to the callback.
+ */
+void evrpc_pool_set_timeout(struct evrpc_pool *pool, int timeout_in_secs);
+
+/**
+ * Hooks for changing the input and output of RPCs; this can be used to
+ * implement compression, authentication, encryption, ...
+ */
+
+enum EVRPC_HOOK_TYPE {
+ EVRPC_INPUT, /**< apply the function to an input hook */
+ EVRPC_OUTPUT /**< apply the function to an output hook */
+};
+
+#ifndef _WIN32
+/** Deprecated alias for EVRPC_INPUT. Not available on windows, where it
+ * conflicts with platform headers. */
+#define INPUT EVRPC_INPUT
+/** Deprecated alias for EVRPC_OUTPUT. Not available on windows, where it
+ * conflicts with platform headers. */
+#define OUTPUT EVRPC_OUTPUT
+#endif
+
+/**
+ * Return value from hook processing functions
+ */
+
+enum EVRPC_HOOK_RESULT {
+ EVRPC_TERMINATE = -1, /**< indicates the rpc should be terminated */
+ EVRPC_CONTINUE = 0, /**< continue processing the rpc */
+ EVRPC_PAUSE = 1 /**< pause processing request until resumed */
+};
+
+/** adds a processing hook to either an rpc base or rpc pool
+ *
+ * If a hook returns TERMINATE, the processing is aborted. On CONTINUE,
+ * the request is immediately processed after the hook returns. If the
+ * hook returns PAUSE, request processing stops until evrpc_resume_request()
+ * has been called.
+ *
+ * The add functions return handles that can be used for removing hooks.
+ *
+ * @param vbase a pointer to either struct evrpc_base or struct evrpc_pool
+ * @param hook_type either INPUT or OUTPUT
+ * @param cb the callback to call when the hook is activated
+ * @param cb_arg an additional argument for the callback
+ * @return a handle to the hook so it can be removed later
+ * @see evrpc_remove_hook()
+ */
+void *evrpc_add_hook(void *vbase,
+ enum EVRPC_HOOK_TYPE hook_type,
+ int (*cb)(void *, struct evhttp_request *, struct evbuffer *, void *),
+ void *cb_arg);
+
+/** removes a previously added hook
+ *
+ * @param vbase a pointer to either struct evrpc_base or struct evrpc_pool
+ * @param hook_type either INPUT or OUTPUT
+ * @param handle a handle returned by evrpc_add_hook()
+ * @return 1 on success or 0 on failure
+ * @see evrpc_add_hook()
+ */
+int evrpc_remove_hook(void *vbase,
+ enum EVRPC_HOOK_TYPE hook_type,
+ void *handle);
+
+/** resume a paused request
+ *
+ * @param vbase a pointer to either struct evrpc_base or struct evrpc_pool
+ * @param ctx the context pointer provided to the original hook call
+ */
+int
+evrpc_resume_request(void *vbase, void *ctx, enum EVRPC_HOOK_RESULT res);
+
+/** adds meta data to request
+ *
+ * evrpc_hook_add_meta() allows hooks to add meta data to a request. for
+ * a client request, the meta data can be inserted by an outgoing request hook
+ * and retrieved by the incoming request hook.
+ *
+ * @param ctx the context provided to the hook call
+ * @param key a NUL-terminated c-string
+ * @param data the data to be associated with the key
+ * @param data_size the size of the data
+ */
+void evrpc_hook_add_meta(void *ctx, const char *key,
+ const void *data, size_t data_size);
+
+/** retrieves meta data previously associated
+ *
+ * evrpc_hook_find_meta() can be used to retrieve meta data associated to a
+ * request by a previous hook.
+ * @param ctx the context provided to the hook call
+ * @param key a NUL-terminated c-string
+ * @param data pointer to a data pointer that will contain the retrieved data
+ * @param data_size pointer to the size of the data
+ * @return 0 on success or -1 on failure
+ */
+int evrpc_hook_find_meta(void *ctx, const char *key,
+ void **data, size_t *data_size);
+
+/**
+ * returns the connection object associated with the request
+ *
+ * @param ctx the context provided to the hook call
+ * @return a pointer to the evhttp_connection object
+ */
+struct evhttp_connection *evrpc_hook_get_connection(void *ctx);
+
+/**
+ Function for sending a generic RPC request.
+
+ Do not call this function directly, use EVRPC_MAKE_REQUEST() instead.
+
+ @see EVRPC_MAKE_REQUEST()
+ */
+int evrpc_send_request_generic(struct evrpc_pool *pool,
+ void *request, void *reply,
+ void (*cb)(struct evrpc_status *, void *, void *, void *),
+ void *cb_arg,
+ const char *rpcname,
+ void (*req_marshal)(struct evbuffer *, void *),
+ void (*rpl_clear)(void *),
+ int (*rpl_unmarshal)(void *, struct evbuffer *));
+
+/**
+ Function for registering a generic RPC with the RPC base.
+
+ Do not call this function directly, use EVRPC_REGISTER() instead.
+
+ @see EVRPC_REGISTER()
+ */
+int
+evrpc_register_generic(struct evrpc_base *base, const char *name,
+ void (*callback)(struct evrpc_req_generic *, void *), void *cbarg,
+ void *(*req_new)(void *), void *req_new_arg, void (*req_free)(void *),
+ int (*req_unmarshal)(void *, struct evbuffer *),
+ void *(*rpl_new)(void *), void *rpl_new_arg, void (*rpl_free)(void *),
+ int (*rpl_complete)(void *),
+ void (*rpl_marshal)(struct evbuffer *, void *));
+
+/** accessors for obscure and undocumented functionality */
+struct evrpc_pool* evrpc_request_get_pool(struct evrpc_request_wrapper *ctx);
+void evrpc_request_set_pool(struct evrpc_request_wrapper *ctx,
+ struct evrpc_pool *pool);
+void evrpc_request_set_cb(struct evrpc_request_wrapper *ctx,
+ void (*cb)(struct evrpc_status*, void *request, void *reply, void *arg),
+ void *cb_arg);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT2_RPC_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/rpc_compat.h b/libs/libevent/include/event2/rpc_compat.h
new file mode 100644
index 0000000000..8d8334d25f
--- /dev/null
+++ b/libs/libevent/include/event2/rpc_compat.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2006-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_RPC_COMPAT_H_INCLUDED_
+#define EVENT2_RPC_COMPAT_H_INCLUDED_
+
+/** @file event2/rpc_compat.h
+
+ Deprecated versions of the functions in rpc.h: provided only for
+ backwards compatibility.
+
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** backwards compatible accessors that work only with gcc */
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+
+#undef EVTAG_ASSIGN
+#undef EVTAG_GET
+#undef EVTAG_ADD
+
+#define EVTAG_ASSIGN(msg, member, args...) \
+ (*(msg)->base->member##_assign)(msg, ## args)
+#define EVTAG_GET(msg, member, args...) \
+ (*(msg)->base->member##_get)(msg, ## args)
+#define EVTAG_ADD(msg, member, args...) \
+ (*(msg)->base->member##_add)(msg, ## args)
+#endif
+#define EVTAG_LEN(msg, member) ((msg)->member##_length)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT2_EVENT_COMPAT_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/rpc_struct.h b/libs/libevent/include/event2/rpc_struct.h
new file mode 100644
index 0000000000..8f691f49fb
--- /dev/null
+++ b/libs/libevent/include/event2/rpc_struct.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2006-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_RPC_STRUCT_H_INCLUDED_
+#define EVENT2_RPC_STRUCT_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @file event2/rpc_struct.h
+
+ Structures used by rpc.h. Using these structures directly may harm
+ forward compatibility: be careful!
+
+ */
+
+/**
+ * provides information about the completed RPC request.
+ */
+struct evrpc_status {
+#define EVRPC_STATUS_ERR_NONE 0
+#define EVRPC_STATUS_ERR_TIMEOUT 1
+#define EVRPC_STATUS_ERR_BADPAYLOAD 2
+#define EVRPC_STATUS_ERR_UNSTARTED 3
+#define EVRPC_STATUS_ERR_HOOKABORTED 4
+ int error;
+
+ /* for looking at headers or other information */
+ struct evhttp_request *http_req;
+};
+
+/* the structure below needs to be synchronized with evrpc_req_generic */
+
+/* Encapsulates a request */
+struct evrpc {
+ TAILQ_ENTRY(evrpc) next;
+
+ /* the URI at which the request handler lives */
+ const char* uri;
+
+ /* creates a new request structure */
+ void *(*request_new)(void *);
+ void *request_new_arg;
+
+ /* frees the request structure */
+ void (*request_free)(void *);
+
+ /* unmarshals the buffer into the proper request structure */
+ int (*request_unmarshal)(void *, struct evbuffer *);
+
+ /* creates a new reply structure */
+ void *(*reply_new)(void *);
+ void *reply_new_arg;
+
+ /* frees the reply structure */
+ void (*reply_free)(void *);
+
+ /* verifies that the reply is valid */
+ int (*reply_complete)(void *);
+
+ /* marshals the reply into a buffer */
+ void (*reply_marshal)(struct evbuffer*, void *);
+
+ /* the callback invoked for each received rpc */
+ void (*cb)(struct evrpc_req_generic *, void *);
+ void *cb_arg;
+
+ /* reference for further configuration */
+ struct evrpc_base *base;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT2_RPC_STRUCT_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/tag.h b/libs/libevent/include/event2/tag.h
new file mode 100644
index 0000000000..2f73bfc00e
--- /dev/null
+++ b/libs/libevent/include/event2/tag.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_TAG_H_INCLUDED_
+#define EVENT2_TAG_H_INCLUDED_
+
+/** @file event2/tag.h
+
+ Helper functions for reading and writing tagged data onto buffers.
+
+ */
+
+#include <event2/visibility.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <event2/event-config.h>
+#ifdef EVENT__HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+/* For int types. */
+#include <event2/util.h>
+
+struct evbuffer;
+
+/*
+ * Marshaling tagged data - We assume that all tags are inserted in their
+ * numeric order - so that unknown tags will always be higher than the
+ * known ones - and we can just ignore the end of an event buffer.
+ */
+
+EVENT2_EXPORT_SYMBOL
+void evtag_init(void);
+
+/**
+ Unmarshals the header and returns the length of the payload
+
+ @param evbuf the buffer from which to unmarshal data
+ @param ptag a pointer in which the tag id is being stored
+ @returns -1 on failure or the number of bytes in the remaining payload.
+*/
+EVENT2_EXPORT_SYMBOL
+int evtag_unmarshal_header(struct evbuffer *evbuf, ev_uint32_t *ptag);
+
+EVENT2_EXPORT_SYMBOL
+void evtag_marshal(struct evbuffer *evbuf, ev_uint32_t tag, const void *data,
+ ev_uint32_t len);
+EVENT2_EXPORT_SYMBOL
+void evtag_marshal_buffer(struct evbuffer *evbuf, ev_uint32_t tag,
+ struct evbuffer *data);
+
+/**
+ Encode an integer and store it in an evbuffer.
+
+ We encode integers by nybbles; the first nibble contains the number
+ of significant nibbles - 1; this allows us to encode up to 64-bit
+ integers. This function is byte-order independent.
+
+ @param evbuf evbuffer to store the encoded number
+ @param number a 32-bit integer
+ */
+EVENT2_EXPORT_SYMBOL
+void evtag_encode_int(struct evbuffer *evbuf, ev_uint32_t number);
+EVENT2_EXPORT_SYMBOL
+void evtag_encode_int64(struct evbuffer *evbuf, ev_uint64_t number);
+
+EVENT2_EXPORT_SYMBOL
+void evtag_marshal_int(struct evbuffer *evbuf, ev_uint32_t tag,
+ ev_uint32_t integer);
+EVENT2_EXPORT_SYMBOL
+void evtag_marshal_int64(struct evbuffer *evbuf, ev_uint32_t tag,
+ ev_uint64_t integer);
+
+EVENT2_EXPORT_SYMBOL
+void evtag_marshal_string(struct evbuffer *buf, ev_uint32_t tag,
+ const char *string);
+
+EVENT2_EXPORT_SYMBOL
+void evtag_marshal_timeval(struct evbuffer *evbuf, ev_uint32_t tag,
+ struct timeval *tv);
+
+EVENT2_EXPORT_SYMBOL
+int evtag_unmarshal(struct evbuffer *src, ev_uint32_t *ptag,
+ struct evbuffer *dst);
+EVENT2_EXPORT_SYMBOL
+int evtag_peek(struct evbuffer *evbuf, ev_uint32_t *ptag);
+EVENT2_EXPORT_SYMBOL
+int evtag_peek_length(struct evbuffer *evbuf, ev_uint32_t *plength);
+EVENT2_EXPORT_SYMBOL
+int evtag_payload_length(struct evbuffer *evbuf, ev_uint32_t *plength);
+EVENT2_EXPORT_SYMBOL
+int evtag_consume(struct evbuffer *evbuf);
+
+EVENT2_EXPORT_SYMBOL
+int evtag_unmarshal_int(struct evbuffer *evbuf, ev_uint32_t need_tag,
+ ev_uint32_t *pinteger);
+EVENT2_EXPORT_SYMBOL
+int evtag_unmarshal_int64(struct evbuffer *evbuf, ev_uint32_t need_tag,
+ ev_uint64_t *pinteger);
+
+EVENT2_EXPORT_SYMBOL
+int evtag_unmarshal_fixed(struct evbuffer *src, ev_uint32_t need_tag,
+ void *data, size_t len);
+
+EVENT2_EXPORT_SYMBOL
+int evtag_unmarshal_string(struct evbuffer *evbuf, ev_uint32_t need_tag,
+ char **pstring);
+
+EVENT2_EXPORT_SYMBOL
+int evtag_unmarshal_timeval(struct evbuffer *evbuf, ev_uint32_t need_tag,
+ struct timeval *ptv);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT2_TAG_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/tag_compat.h b/libs/libevent/include/event2/tag_compat.h
new file mode 100644
index 0000000000..a276c0d35b
--- /dev/null
+++ b/libs/libevent/include/event2/tag_compat.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_TAG_COMPAT_H_INCLUDED_
+#define EVENT2_TAG_COMPAT_H_INCLUDED_
+
+/** @file event2/tag_compat.h
+
+ Obsolete/deprecated functions from tag.h; provided only for backwards
+ compatibility.
+ */
+
+/**
+ @name Misnamed functions
+
+ @deprecated These macros are deprecated because their names don't follow
+ Libevent's naming conventions. Use evtag_encode_int and
+ evtag_encode_int64 instead.
+
+ @{
+*/
+#define encode_int(evbuf, number) evtag_encode_int((evbuf), (number))
+#define encode_int64(evbuf, number) evtag_encode_int64((evbuf), (number))
+/**@}*/
+
+#endif /* EVENT2_TAG_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/thread.h b/libs/libevent/include/event2/thread.h
new file mode 100644
index 0000000000..b51998631b
--- /dev/null
+++ b/libs/libevent/include/event2/thread.h
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2008-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_THREAD_H_INCLUDED_
+#define EVENT2_THREAD_H_INCLUDED_
+
+/** @file event2/thread.h
+
+ Functions for multi-threaded applications using Libevent.
+
+ When using a multi-threaded application in which multiple threads
+ add and delete events from a single event base, Libevent needs to
+ lock its data structures.
+
+ Like the memory-management function hooks, all of the threading functions
+ _must_ be set up before an event_base is created if you want the base to
+ use them.
+
+ Most programs will either be using Windows threads or Posix threads. You
+ can configure Libevent to use one of these event_use_windows_threads() or
+ event_use_pthreads() respectively. If you're using another threading
+ library, you'll need to configure threading functions manually using
+ evthread_set_lock_callbacks() and evthread_set_condition_callbacks().
+
+ */
+
+#include <event2/visibility.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <event2/event-config.h>
+
+/**
+ @name Flags passed to lock functions
+
+ @{
+*/
+/** A flag passed to a locking callback when the lock was allocated as a
+ * read-write lock, and we want to acquire or release the lock for writing. */
+#define EVTHREAD_WRITE 0x04
+/** A flag passed to a locking callback when the lock was allocated as a
+ * read-write lock, and we want to acquire or release the lock for reading. */
+#define EVTHREAD_READ 0x08
+/** A flag passed to a locking callback when we don't want to block waiting
+ * for the lock; if we can't get the lock immediately, we will instead
+ * return nonzero from the locking callback. */
+#define EVTHREAD_TRY 0x10
+/**@}*/
+
+#if !defined(EVENT__DISABLE_THREAD_SUPPORT) || defined(EVENT_IN_DOXYGEN_)
+
+#define EVTHREAD_LOCK_API_VERSION 1
+
+/**
+ @name Types of locks
+
+ @{*/
+/** A recursive lock is one that can be acquired multiple times at once by the
+ * same thread. No other process can allocate the lock until the thread that
+ * has been holding it has unlocked it as many times as it locked it. */
+#define EVTHREAD_LOCKTYPE_RECURSIVE 1
+/* A read-write lock is one that allows multiple simultaneous readers, but
+ * where any one writer excludes all other writers and readers. */
+#define EVTHREAD_LOCKTYPE_READWRITE 2
+/**@}*/
+
+/** This structure describes the interface a threading library uses for
+ * locking. It's used to tell evthread_set_lock_callbacks() how to use
+ * locking on this platform.
+ */
+struct evthread_lock_callbacks {
+ /** The current version of the locking API. Set this to
+ * EVTHREAD_LOCK_API_VERSION */
+ int lock_api_version;
+ /** Which kinds of locks does this version of the locking API
+ * support? A bitfield of EVTHREAD_LOCKTYPE_RECURSIVE and
+ * EVTHREAD_LOCKTYPE_READWRITE.
+ *
+ * (Note that RECURSIVE locks are currently mandatory, and
+ * READWRITE locks are not currently used.)
+ **/
+ unsigned supported_locktypes;
+ /** Function to allocate and initialize new lock of type 'locktype'.
+ * Returns NULL on failure. */
+ void *(*alloc)(unsigned locktype);
+ /** Funtion to release all storage held in 'lock', which was created
+ * with type 'locktype'. */
+ void (*free)(void *lock, unsigned locktype);
+ /** Acquire an already-allocated lock at 'lock' with mode 'mode'.
+ * Returns 0 on success, and nonzero on failure. */
+ int (*lock)(unsigned mode, void *lock);
+ /** Release a lock at 'lock' using mode 'mode'. Returns 0 on success,
+ * and nonzero on failure. */
+ int (*unlock)(unsigned mode, void *lock);
+};
+
+/** Sets a group of functions that Libevent should use for locking.
+ * For full information on the required callback API, see the
+ * documentation for the individual members of evthread_lock_callbacks.
+ *
+ * Note that if you're using Windows or the Pthreads threading library, you
+ * probably shouldn't call this function; instead, use
+ * evthread_use_windows_threads() or evthread_use_posix_threads() if you can.
+ */
+EVENT2_EXPORT_SYMBOL
+int evthread_set_lock_callbacks(const struct evthread_lock_callbacks *);
+
+#define EVTHREAD_CONDITION_API_VERSION 1
+
+struct timeval;
+
+/** This structure describes the interface a threading library uses for
+ * condition variables. It's used to tell evthread_set_condition_callbacks
+ * how to use locking on this platform.
+ */
+struct evthread_condition_callbacks {
+ /** The current version of the conditions API. Set this to
+ * EVTHREAD_CONDITION_API_VERSION */
+ int condition_api_version;
+ /** Function to allocate and initialize a new condition variable.
+ * Returns the condition variable on success, and NULL on failure.
+ * The 'condtype' argument will be 0 with this API version.
+ */
+ void *(*alloc_condition)(unsigned condtype);
+ /** Function to free a condition variable. */
+ void (*free_condition)(void *cond);
+ /** Function to signal a condition variable. If 'broadcast' is 1, all
+ * threads waiting on 'cond' should be woken; otherwise, only on one
+ * thread is worken. Should return 0 on success, -1 on failure.
+ * This function will only be called while holding the associated
+ * lock for the condition.
+ */
+ int (*signal_condition)(void *cond, int broadcast);
+ /** Function to wait for a condition variable. The lock 'lock'
+ * will be held when this function is called; should be released
+ * while waiting for the condition to be come signalled, and
+ * should be held again when this function returns.
+ * If timeout is provided, it is interval of seconds to wait for
+ * the event to become signalled; if it is NULL, the function
+ * should wait indefinitely.
+ *
+ * The function should return -1 on error; 0 if the condition
+ * was signalled, or 1 on a timeout. */
+ int (*wait_condition)(void *cond, void *lock,
+ const struct timeval *timeout);
+};
+
+/** Sets a group of functions that Libevent should use for condition variables.
+ * For full information on the required callback API, see the
+ * documentation for the individual members of evthread_condition_callbacks.
+ *
+ * Note that if you're using Windows or the Pthreads threading library, you
+ * probably shouldn't call this function; instead, use
+ * evthread_use_windows_threads() or evthread_use_pthreads() if you can.
+ */
+EVENT2_EXPORT_SYMBOL
+int evthread_set_condition_callbacks(
+ const struct evthread_condition_callbacks *);
+
+/**
+ Sets the function for determining the thread id.
+
+ @param base the event base for which to set the id function
+ @param id_fn the identify function Libevent should invoke to
+ determine the identity of a thread.
+*/
+EVENT2_EXPORT_SYMBOL
+void evthread_set_id_callback(
+ unsigned long (*id_fn)(void));
+
+#if (defined(_WIN32) && !defined(EVENT__DISABLE_THREAD_SUPPORT)) || defined(EVENT_IN_DOXYGEN_)
+/** Sets up Libevent for use with Windows builtin locking and thread ID
+ functions. Unavailable if Libevent is not built for Windows.
+
+ @return 0 on success, -1 on failure. */
+EVENT2_EXPORT_SYMBOL
+int evthread_use_windows_threads(void);
+/**
+ Defined if Libevent was built with support for evthread_use_windows_threads()
+*/
+#define EVTHREAD_USE_WINDOWS_THREADS_IMPLEMENTED 1
+#endif
+
+#if defined(EVENT__HAVE_PTHREADS) || defined(EVENT_IN_DOXYGEN_)
+/** Sets up Libevent for use with Pthreads locking and thread ID functions.
+ Unavailable if Libevent is not build for use with pthreads. Requires
+ libraries to link against Libevent_pthreads as well as Libevent.
+
+ @return 0 on success, -1 on failure. */
+EVENT2_EXPORT_SYMBOL
+int evthread_use_pthreads(void);
+/** Defined if Libevent was built with support for evthread_use_pthreads() */
+#define EVTHREAD_USE_PTHREADS_IMPLEMENTED 1
+
+#endif
+
+/** Enable debugging wrappers around the current lock callbacks. If Libevent
+ * makes one of several common locking errors, exit with an assertion failure.
+ *
+ * If you're going to call this function, you must do so before any locks are
+ * allocated.
+ **/
+EVENT2_EXPORT_SYMBOL
+void evthread_enable_lock_debugging(void);
+
+/* Old (misspelled) version: This is deprecated; use
+ * evthread_enable_log_debugging instead. */
+EVENT2_EXPORT_SYMBOL
+void evthread_enable_lock_debuging(void);
+
+#endif /* EVENT__DISABLE_THREAD_SUPPORT */
+
+struct event_base;
+/** Make sure it's safe to tell an event base to wake up from another thread
+ or a signal handler.
+
+ You shouldn't need to call this by hand; configuring the base with thread
+ support should be necessary and sufficient.
+
+ @return 0 on success, -1 on failure.
+ */
+EVENT2_EXPORT_SYMBOL
+int evthread_make_base_notifiable(struct event_base *base);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT2_THREAD_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/util.h b/libs/libevent/include/event2/util.h
new file mode 100644
index 0000000000..3936786ec9
--- /dev/null
+++ b/libs/libevent/include/event2/util.h
@@ -0,0 +1,852 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_UTIL_H_INCLUDED_
+#define EVENT2_UTIL_H_INCLUDED_
+
+/** @file event2/util.h
+
+ Common convenience functions for cross-platform portability and
+ related socket manipulations.
+
+ */
+#include <event2/visibility.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <event2/event-config.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef EVENT__HAVE_STDINT_H
+#include <stdint.h>
+#elif defined(EVENT__HAVE_INTTYPES_H)
+#include <inttypes.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef EVENT__HAVE_STDDEF_H
+#include <stddef.h>
+#endif
+#ifdef _MSC_VER
+#include <BaseTsd.h>
+#endif
+#include <stdarg.h>
+#ifdef EVENT__HAVE_NETDB_H
+#if !defined(_GNU_SOURCE)
+#define _GNU_SOURCE
+#endif
+#include <netdb.h>
+#endif
+
+#ifdef _WIN32
+#include <winsock2.h>
+#ifdef EVENT__HAVE_GETADDRINFO
+/* for EAI_* definitions. */
+#include <ws2tcpip.h>
+#endif
+#else
+#include <sys/socket.h>
+#endif
+
+/* Some openbsd autoconf versions get the name of this macro wrong. */
+#if defined(EVENT__SIZEOF_VOID__) && !defined(EVENT__SIZEOF_VOID_P)
+#define EVENT__SIZEOF_VOID_P EVENT__SIZEOF_VOID__
+#endif
+
+/**
+ * @name Standard integer types.
+ *
+ * Integer type definitions for types that are supposed to be defined in the
+ * C99-specified stdint.h. Shamefully, some platforms do not include
+ * stdint.h, so we need to replace it. (If you are on a platform like this,
+ * your C headers are now over 10 years out of date. You should bug them to
+ * do something about this.)
+ *
+ * We define:
+ *
+ * <dl>
+ * <dt>ev_uint64_t, ev_uint32_t, ev_uint16_t, ev_uint8_t</dt>
+ * <dd>unsigned integer types of exactly 64, 32, 16, and 8 bits
+ * respectively.</dd>
+ * <dt>ev_int64_t, ev_int32_t, ev_int16_t, ev_int8_t</dt>
+ * <dd>signed integer types of exactly 64, 32, 16, and 8 bits
+ * respectively.</dd>
+ * <dt>ev_uintptr_t, ev_intptr_t</dt>
+ * <dd>unsigned/signed integers large enough
+ * to hold a pointer without loss of bits.</dd>
+ * <dt>ev_ssize_t</dt>
+ * <dd>A signed type of the same size as size_t</dd>
+ * <dt>ev_off_t</dt>
+ * <dd>A signed type typically used to represent offsets within a
+ * (potentially large) file</dd>
+ *
+ * @{
+ */
+#ifdef EVENT__HAVE_UINT64_T
+#define ev_uint64_t uint64_t
+#define ev_int64_t int64_t
+#elif defined(_WIN32)
+#define ev_uint64_t unsigned __int64
+#define ev_int64_t signed __int64
+#elif EVENT__SIZEOF_LONG_LONG == 8
+#define ev_uint64_t unsigned long long
+#define ev_int64_t long long
+#elif EVENT__SIZEOF_LONG == 8
+#define ev_uint64_t unsigned long
+#define ev_int64_t long
+#elif defined(EVENT_IN_DOXYGEN_)
+#define ev_uint64_t ...
+#define ev_int64_t ...
+#else
+#error "No way to define ev_uint64_t"
+#endif
+
+#ifdef EVENT__HAVE_UINT32_T
+#define ev_uint32_t uint32_t
+#define ev_int32_t int32_t
+#elif defined(_WIN32)
+#define ev_uint32_t unsigned int
+#define ev_int32_t signed int
+#elif EVENT__SIZEOF_LONG == 4
+#define ev_uint32_t unsigned long
+#define ev_int32_t signed long
+#elif EVENT__SIZEOF_INT == 4
+#define ev_uint32_t unsigned int
+#define ev_int32_t signed int
+#elif defined(EVENT_IN_DOXYGEN_)
+#define ev_uint32_t ...
+#define ev_int32_t ...
+#else
+#error "No way to define ev_uint32_t"
+#endif
+
+#ifdef EVENT__HAVE_UINT16_T
+#define ev_uint16_t uint16_t
+#define ev_int16_t int16_t
+#elif defined(_WIN32)
+#define ev_uint16_t unsigned short
+#define ev_int16_t signed short
+#elif EVENT__SIZEOF_INT == 2
+#define ev_uint16_t unsigned int
+#define ev_int16_t signed int
+#elif EVENT__SIZEOF_SHORT == 2
+#define ev_uint16_t unsigned short
+#define ev_int16_t signed short
+#elif defined(EVENT_IN_DOXYGEN_)
+#define ev_uint16_t ...
+#define ev_int16_t ...
+#else
+#error "No way to define ev_uint16_t"
+#endif
+
+#ifdef EVENT__HAVE_UINT8_T
+#define ev_uint8_t uint8_t
+#define ev_int8_t int8_t
+#elif defined(EVENT_IN_DOXYGEN_)
+#define ev_uint8_t ...
+#define ev_int8_t ...
+#else
+#define ev_uint8_t unsigned char
+#define ev_int8_t signed char
+#endif
+
+#ifdef EVENT__HAVE_UINTPTR_T
+#define ev_uintptr_t uintptr_t
+#define ev_intptr_t intptr_t
+#elif EVENT__SIZEOF_VOID_P <= 4
+#define ev_uintptr_t ev_uint32_t
+#define ev_intptr_t ev_int32_t
+#elif EVENT__SIZEOF_VOID_P <= 8
+#define ev_uintptr_t ev_uint64_t
+#define ev_intptr_t ev_int64_t
+#elif defined(EVENT_IN_DOXYGEN_)
+#define ev_uintptr_t ...
+#define ev_intptr_t ...
+#else
+#error "No way to define ev_uintptr_t"
+#endif
+
+#ifdef EVENT__ssize_t
+#define ev_ssize_t EVENT__ssize_t
+#else
+#define ev_ssize_t ssize_t
+#endif
+
+/* Note that we define ev_off_t based on the compile-time size of off_t that
+ * we used to build Libevent, and not based on the current size of off_t.
+ * (For example, we don't define ev_off_t to off_t.). We do this because
+ * some systems let you build your software with different off_t sizes
+ * at runtime, and so putting in any dependency on off_t would risk API
+ * mismatch.
+ */
+#ifdef _WIN32
+#define ev_off_t ev_int64_t
+#elif EVENT__SIZEOF_OFF_T == 8
+#define ev_off_t ev_int64_t
+#elif EVENT__SIZEOF_OFF_T == 4
+#define ev_off_t ev_int32_t
+#elif defined(EVENT_IN_DOXYGEN_)
+#define ev_off_t ...
+#else
+#define ev_off_t off_t
+#endif
+/**@}*/
+
+/* Limits for integer types.
+
+ We're making two assumptions here:
+ - The compiler does constant folding properly.
+ - The platform does signed arithmetic in two's complement.
+*/
+
+/**
+ @name Limits for integer types
+
+ These macros hold the largest or smallest values possible for the
+ ev_[u]int*_t types.
+
+ @{
+*/
+#ifndef EVENT__HAVE_STDINT_H
+#define EV_UINT64_MAX ((((ev_uint64_t)0xffffffffUL) << 32) | 0xffffffffUL)
+#define EV_INT64_MAX ((((ev_int64_t) 0x7fffffffL) << 32) | 0xffffffffL)
+#define EV_INT64_MIN ((-EV_INT64_MAX) - 1)
+#define EV_UINT32_MAX ((ev_uint32_t)0xffffffffUL)
+#define EV_INT32_MAX ((ev_int32_t) 0x7fffffffL)
+#define EV_INT32_MIN ((-EV_INT32_MAX) - 1)
+#define EV_UINT16_MAX ((ev_uint16_t)0xffffUL)
+#define EV_INT16_MAX ((ev_int16_t) 0x7fffL)
+#define EV_INT16_MIN ((-EV_INT16_MAX) - 1)
+#define EV_UINT8_MAX 255
+#define EV_INT8_MAX 127
+#define EV_INT8_MIN ((-EV_INT8_MAX) - 1)
+#else
+#define EV_UINT64_MAX UINT64_MAX
+#define EV_INT64_MAX INT64_MAX
+#define EV_INT64_MIN INT64_MIN
+#define EV_UINT32_MAX UINT32_MAX
+#define EV_INT32_MAX INT32_MAX
+#define EV_INT32_MIN INT32_MIN
+#define EV_UINT16_MAX UINT16_MAX
+#define EV_INT16_MAX INT16_MAX
+#define EV_UINT8_MAX UINT8_MAX
+#define EV_INT8_MAX INT8_MAX
+#define EV_INT8_MIN INT8_MIN
+/** @} */
+#endif
+
+
+/**
+ @name Limits for SIZE_T and SSIZE_T
+
+ @{
+*/
+#if EVENT__SIZEOF_SIZE_T == 8
+#define EV_SIZE_MAX EV_UINT64_MAX
+#define EV_SSIZE_MAX EV_INT64_MAX
+#elif EVENT__SIZEOF_SIZE_T == 4
+#define EV_SIZE_MAX EV_UINT32_MAX
+#define EV_SSIZE_MAX EV_INT32_MAX
+#elif defined(EVENT_IN_DOXYGEN_)
+#define EV_SIZE_MAX ...
+#define EV_SSIZE_MAX ...
+#else
+#error "No way to define SIZE_MAX"
+#endif
+
+#define EV_SSIZE_MIN ((-EV_SSIZE_MAX) - 1)
+/**@}*/
+
+#ifdef _WIN32
+#define ev_socklen_t int
+#elif defined(EVENT__socklen_t)
+#define ev_socklen_t EVENT__socklen_t
+#else
+#define ev_socklen_t socklen_t
+#endif
+
+#ifdef EVENT__HAVE_STRUCT_SOCKADDR_STORAGE___SS_FAMILY
+#if !defined(EVENT__HAVE_STRUCT_SOCKADDR_STORAGE_SS_FAMILY) \
+ && !defined(ss_family)
+#define ss_family __ss_family
+#endif
+#endif
+
+/**
+ * A type wide enough to hold the output of "socket()" or "accept()". On
+ * Windows, this is an intptr_t; elsewhere, it is an int. */
+#ifdef _WIN32
+#define evutil_socket_t intptr_t
+#else
+#define evutil_socket_t int
+#endif
+
+/**
+ * Structure to hold information about a monotonic timer
+ *
+ * Use this with evutil_configure_monotonic_time() and
+ * evutil_gettime_monotonic().
+ *
+ * This is an opaque structure; you can allocate one using
+ * evutil_monotonic_timer_new().
+ *
+ * @see evutil_monotonic_timer_new(), evutil_monotonic_timer_free(),
+ * evutil_configure_monotonic_time(), evutil_gettime_monotonic()
+ */
+struct evutil_monotonic_timer
+#ifdef EVENT_IN_DOXYGEN_
+{/*Empty body so that doxygen will generate documentation here.*/}
+#endif
+;
+
+#define EV_MONOT_PRECISE 1
+#define EV_MONOT_FALLBACK 2
+
+/** Allocate a new struct evutil_monotonic_timer for use with the
+ * evutil_configure_monotonic_time() and evutil_gettime_monotonic()
+ * functions. You must configure the timer with
+ * evutil_configure_monotonic_time() before using it.
+ */
+EVENT2_EXPORT_SYMBOL
+struct evutil_monotonic_timer * evutil_monotonic_timer_new(void);
+
+/** Free a struct evutil_monotonic_timer that was allocated using
+ * evutil_monotonic_timer_new().
+ */
+EVENT2_EXPORT_SYMBOL
+void evutil_monotonic_timer_free(struct evutil_monotonic_timer *timer);
+
+/** Set up a struct evutil_monotonic_timer; flags can include
+ * EV_MONOT_PRECISE and EV_MONOT_FALLBACK.
+ */
+EVENT2_EXPORT_SYMBOL
+int evutil_configure_monotonic_time(struct evutil_monotonic_timer *timer,
+ int flags);
+
+/** Query the current monotonic time from a struct evutil_monotonic_timer
+ * previously configured with evutil_configure_monotonic_time(). Monotonic
+ * time is guaranteed never to run in reverse, but is not necessarily epoch-
+ * based, or relative to any other definite point. Use it to make reliable
+ * measurements of elapsed time between events even when the system time
+ * may be changed.
+ *
+ * It is not safe to use this funtion on the same timer from multiple
+ * threads.
+ */
+EVENT2_EXPORT_SYMBOL
+int evutil_gettime_monotonic(struct evutil_monotonic_timer *timer,
+ struct timeval *tp);
+
+/** Create two new sockets that are connected to each other.
+
+ On Unix, this simply calls socketpair(). On Windows, it uses the
+ loopback network interface on 127.0.0.1, and only
+ AF_INET,SOCK_STREAM are supported.
+
+ (This may fail on some Windows hosts where firewall software has cleverly
+ decided to keep 127.0.0.1 from talking to itself.)
+
+ Parameters and return values are as for socketpair()
+*/
+EVENT2_EXPORT_SYMBOL
+int evutil_socketpair(int d, int type, int protocol, evutil_socket_t sv[2]);
+/** Do platform-specific operations as needed to make a socket nonblocking.
+
+ @param sock The socket to make nonblocking
+ @return 0 on success, -1 on failure
+ */
+EVENT2_EXPORT_SYMBOL
+int evutil_make_socket_nonblocking(evutil_socket_t sock);
+
+/** Do platform-specific operations to make a listener socket reusable.
+
+ Specifically, we want to make sure that another program will be able
+ to bind this address right after we've closed the listener.
+
+ This differs from Windows's interpretation of "reusable", which
+ allows multiple listeners to bind the same address at the same time.
+
+ @param sock The socket to make reusable
+ @return 0 on success, -1 on failure
+ */
+EVENT2_EXPORT_SYMBOL
+int evutil_make_listen_socket_reuseable(evutil_socket_t sock);
+
+/** Do platform-specific operations to make a listener port reusable.
+
+ Specifically, we want to make sure that multiple programs which also
+ set the same socket option will be able to bind, listen at the same time.
+
+ This is a feature available only to Linux 3.9+
+
+ @param sock The socket to make reusable
+ @return 0 on success, -1 on failure
+ */
+EVENT2_EXPORT_SYMBOL
+int evutil_make_listen_socket_reuseable_port(evutil_socket_t sock);
+
+/** Do platform-specific operations as needed to close a socket upon a
+ successful execution of one of the exec*() functions.
+
+ @param sock The socket to be closed
+ @return 0 on success, -1 on failure
+ */
+EVENT2_EXPORT_SYMBOL
+int evutil_make_socket_closeonexec(evutil_socket_t sock);
+
+/** Do the platform-specific call needed to close a socket returned from
+ socket() or accept().
+
+ @param sock The socket to be closed
+ @return 0 on success, -1 on failure
+ */
+EVENT2_EXPORT_SYMBOL
+int evutil_closesocket(evutil_socket_t sock);
+#define EVUTIL_CLOSESOCKET(s) evutil_closesocket(s)
+
+/** Do platform-specific operations, if possible, to make a tcp listener
+ * socket defer accept()s until there is data to read.
+ *
+ * Not all platforms support this. You don't want to do this for every
+ * listener socket: only the ones that implement a protocol where the
+ * client transmits before the server needs to respond.
+ *
+ * @param sock The listening socket to to make deferred
+ * @return 0 on success (whether the operation is supported or not),
+ * -1 on failure
+*/
+EVENT2_EXPORT_SYMBOL
+int evutil_make_tcp_listen_socket_deferred(evutil_socket_t sock);
+
+#ifdef _WIN32
+/** Return the most recent socket error. Not idempotent on all platforms. */
+#define EVUTIL_SOCKET_ERROR() WSAGetLastError()
+/** Replace the most recent socket error with errcode */
+#define EVUTIL_SET_SOCKET_ERROR(errcode) \
+ do { WSASetLastError(errcode); } while (0)
+/** Return the most recent socket error to occur on sock. */
+EVENT2_EXPORT_SYMBOL
+int evutil_socket_geterror(evutil_socket_t sock);
+/** Convert a socket error to a string. */
+EVENT2_EXPORT_SYMBOL
+const char *evutil_socket_error_to_string(int errcode);
+#elif defined(EVENT_IN_DOXYGEN_)
+/**
+ @name Socket error functions
+
+ These functions are needed for making programs compatible between
+ Windows and Unix-like platforms.
+
+ You see, Winsock handles socket errors differently from the rest of
+ the world. Elsewhere, a socket error is like any other error and is
+ stored in errno. But winsock functions require you to retrieve the
+ error with a special function, and don't let you use strerror for
+ the error codes. And handling EWOULDBLOCK is ... different.
+
+ @{
+*/
+/** Return the most recent socket error. Not idempotent on all platforms. */
+#define EVUTIL_SOCKET_ERROR() ...
+/** Replace the most recent socket error with errcode */
+#define EVUTIL_SET_SOCKET_ERROR(errcode) ...
+/** Return the most recent socket error to occur on sock. */
+#define evutil_socket_geterror(sock) ...
+/** Convert a socket error to a string. */
+#define evutil_socket_error_to_string(errcode) ...
+/**@}*/
+#else
+#define EVUTIL_SOCKET_ERROR() (errno)
+#define EVUTIL_SET_SOCKET_ERROR(errcode) \
+ do { errno = (errcode); } while (0)
+#define evutil_socket_geterror(sock) (errno)
+#define evutil_socket_error_to_string(errcode) (strerror(errcode))
+#endif
+
+
+/**
+ * @name Manipulation macros for struct timeval.
+ *
+ * We define replacements
+ * for timeradd, timersub, timerclear, timercmp, and timerisset.
+ *
+ * @{
+ */
+#ifdef EVENT__HAVE_TIMERADD
+#define evutil_timeradd(tvp, uvp, vvp) timeradd((tvp), (uvp), (vvp))
+#define evutil_timersub(tvp, uvp, vvp) timersub((tvp), (uvp), (vvp))
+#else
+#define evutil_timeradd(tvp, uvp, vvp) \
+ do { \
+ (vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \
+ (vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec; \
+ if ((vvp)->tv_usec >= 1000000) { \
+ (vvp)->tv_sec++; \
+ (vvp)->tv_usec -= 1000000; \
+ } \
+ } while (0)
+#define evutil_timersub(tvp, uvp, vvp) \
+ do { \
+ (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
+ (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
+ if ((vvp)->tv_usec < 0) { \
+ (vvp)->tv_sec--; \
+ (vvp)->tv_usec += 1000000; \
+ } \
+ } while (0)
+#endif /* !EVENT__HAVE_TIMERADD */
+
+#ifdef EVENT__HAVE_TIMERCLEAR
+#define evutil_timerclear(tvp) timerclear(tvp)
+#else
+#define evutil_timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0
+#endif
+/**@}*/
+
+/** Return true iff the tvp is related to uvp according to the relational
+ * operator cmp. Recognized values for cmp are ==, <=, <, >=, and >. */
+#define evutil_timercmp(tvp, uvp, cmp) \
+ (((tvp)->tv_sec == (uvp)->tv_sec) ? \
+ ((tvp)->tv_usec cmp (uvp)->tv_usec) : \
+ ((tvp)->tv_sec cmp (uvp)->tv_sec))
+
+#ifdef EVENT__HAVE_TIMERISSET
+#define evutil_timerisset(tvp) timerisset(tvp)
+#else
+#define evutil_timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec)
+#endif
+
+/** Replacement for offsetof on platforms that don't define it. */
+#ifdef offsetof
+#define evutil_offsetof(type, field) offsetof(type, field)
+#else
+#define evutil_offsetof(type, field) ((off_t)(&((type *)0)->field))
+#endif
+
+/* big-int related functions */
+/** Parse a 64-bit value from a string. Arguments are as for strtol. */
+EVENT2_EXPORT_SYMBOL
+ev_int64_t evutil_strtoll(const char *s, char **endptr, int base);
+
+/** Replacement for gettimeofday on platforms that lack it. */
+#ifdef EVENT__HAVE_GETTIMEOFDAY
+#define evutil_gettimeofday(tv, tz) gettimeofday((tv), (tz))
+#else
+struct timezone;
+EVENT2_EXPORT_SYMBOL
+int evutil_gettimeofday(struct timeval *tv, struct timezone *tz);
+#endif
+
+/** Replacement for snprintf to get consistent behavior on platforms for
+ which the return value of snprintf does not conform to C99.
+ */
+EVENT2_EXPORT_SYMBOL
+int evutil_snprintf(char *buf, size_t buflen, const char *format, ...)
+#ifdef __GNUC__
+ __attribute__((format(printf, 3, 4)))
+#endif
+;
+/** Replacement for vsnprintf to get consistent behavior on platforms for
+ which the return value of snprintf does not conform to C99.
+ */
+EVENT2_EXPORT_SYMBOL
+int evutil_vsnprintf(char *buf, size_t buflen, const char *format, va_list ap)
+#ifdef __GNUC__
+ __attribute__((format(printf, 3, 0)))
+#endif
+;
+
+/** Replacement for inet_ntop for platforms which lack it. */
+EVENT2_EXPORT_SYMBOL
+const char *evutil_inet_ntop(int af, const void *src, char *dst, size_t len);
+/** Replacement for inet_pton for platforms which lack it. */
+EVENT2_EXPORT_SYMBOL
+int evutil_inet_pton(int af, const char *src, void *dst);
+struct sockaddr;
+
+/** Parse an IPv4 or IPv6 address, with optional port, from a string.
+
+ Recognized formats are:
+ - [IPv6Address]:port
+ - [IPv6Address]
+ - IPv6Address
+ - IPv4Address:port
+ - IPv4Address
+
+ If no port is specified, the port in the output is set to 0.
+
+ @param str The string to parse.
+ @param out A struct sockaddr to hold the result. This should probably be
+ a struct sockaddr_storage.
+ @param outlen A pointer to the number of bytes that that 'out' can safely
+ hold. Set to the number of bytes used in 'out' on success.
+ @return -1 if the address is not well-formed, if the port is out of range,
+ or if out is not large enough to hold the result. Otherwise returns
+ 0 on success.
+*/
+EVENT2_EXPORT_SYMBOL
+int evutil_parse_sockaddr_port(const char *str, struct sockaddr *out, int *outlen);
+
+/** Compare two sockaddrs; return 0 if they are equal, or less than 0 if sa1
+ * preceeds sa2, or greater than 0 if sa1 follows sa2. If include_port is
+ * true, consider the port as well as the address. Only implemented for
+ * AF_INET and AF_INET6 addresses. The ordering is not guaranteed to remain
+ * the same between Libevent versions. */
+EVENT2_EXPORT_SYMBOL
+int evutil_sockaddr_cmp(const struct sockaddr *sa1, const struct sockaddr *sa2,
+ int include_port);
+
+/** As strcasecmp, but always compares the characters in locale-independent
+ ASCII. That's useful if you're handling data in ASCII-based protocols.
+ */
+EVENT2_EXPORT_SYMBOL
+int evutil_ascii_strcasecmp(const char *str1, const char *str2);
+/** As strncasecmp, but always compares the characters in locale-independent
+ ASCII. That's useful if you're handling data in ASCII-based protocols.
+ */
+EVENT2_EXPORT_SYMBOL
+int evutil_ascii_strncasecmp(const char *str1, const char *str2, size_t n);
+
+/* Here we define evutil_addrinfo to the native addrinfo type, or redefine it
+ * if this system has no getaddrinfo(). */
+#ifdef EVENT__HAVE_STRUCT_ADDRINFO
+#define evutil_addrinfo addrinfo
+#else
+/** A definition of struct addrinfo for systems that lack it.
+
+ (This is just an alias for struct addrinfo if the system defines
+ struct addrinfo.)
+*/
+struct evutil_addrinfo {
+ int ai_flags; /* AI_PASSIVE, AI_CANONNAME, AI_NUMERICHOST */
+ int ai_family; /* PF_xxx */
+ int ai_socktype; /* SOCK_xxx */
+ int ai_protocol; /* 0 or IPPROTO_xxx for IPv4 and IPv6 */
+ size_t ai_addrlen; /* length of ai_addr */
+ char *ai_canonname; /* canonical name for nodename */
+ struct sockaddr *ai_addr; /* binary address */
+ struct evutil_addrinfo *ai_next; /* next structure in linked list */
+};
+#endif
+/** @name evutil_getaddrinfo() error codes
+
+ These values are possible error codes for evutil_getaddrinfo() and
+ related functions.
+
+ @{
+*/
+#if defined(EAI_ADDRFAMILY) && defined(EVENT__HAVE_GETADDRINFO)
+#define EVUTIL_EAI_ADDRFAMILY EAI_ADDRFAMILY
+#else
+#define EVUTIL_EAI_ADDRFAMILY -901
+#endif
+#if defined(EAI_AGAIN) && defined(EVENT__HAVE_GETADDRINFO)
+#define EVUTIL_EAI_AGAIN EAI_AGAIN
+#else
+#define EVUTIL_EAI_AGAIN -902
+#endif
+#if defined(EAI_BADFLAGS) && defined(EVENT__HAVE_GETADDRINFO)
+#define EVUTIL_EAI_BADFLAGS EAI_BADFLAGS
+#else
+#define EVUTIL_EAI_BADFLAGS -903
+#endif
+#if defined(EAI_FAIL) && defined(EVENT__HAVE_GETADDRINFO)
+#define EVUTIL_EAI_FAIL EAI_FAIL
+#else
+#define EVUTIL_EAI_FAIL -904
+#endif
+#if defined(EAI_FAMILY) && defined(EVENT__HAVE_GETADDRINFO)
+#define EVUTIL_EAI_FAMILY EAI_FAMILY
+#else
+#define EVUTIL_EAI_FAMILY -905
+#endif
+#if defined(EAI_MEMORY) && defined(EVENT__HAVE_GETADDRINFO)
+#define EVUTIL_EAI_MEMORY EAI_MEMORY
+#else
+#define EVUTIL_EAI_MEMORY -906
+#endif
+/* This test is a bit complicated, since some MS SDKs decide to
+ * remove NODATA or redefine it to be the same as NONAME, in a
+ * fun interpretation of RFC 2553 and RFC 3493. */
+#if defined(EAI_NODATA) && defined(EVENT__HAVE_GETADDRINFO) && (!defined(EAI_NONAME) || EAI_NODATA != EAI_NONAME)
+#define EVUTIL_EAI_NODATA EAI_NODATA
+#else
+#define EVUTIL_EAI_NODATA -907
+#endif
+#if defined(EAI_NONAME) && defined(EVENT__HAVE_GETADDRINFO)
+#define EVUTIL_EAI_NONAME EAI_NONAME
+#else
+#define EVUTIL_EAI_NONAME -908
+#endif
+#if defined(EAI_SERVICE) && defined(EVENT__HAVE_GETADDRINFO)
+#define EVUTIL_EAI_SERVICE EAI_SERVICE
+#else
+#define EVUTIL_EAI_SERVICE -909
+#endif
+#if defined(EAI_SOCKTYPE) && defined(EVENT__HAVE_GETADDRINFO)
+#define EVUTIL_EAI_SOCKTYPE EAI_SOCKTYPE
+#else
+#define EVUTIL_EAI_SOCKTYPE -910
+#endif
+#if defined(EAI_SYSTEM) && defined(EVENT__HAVE_GETADDRINFO)
+#define EVUTIL_EAI_SYSTEM EAI_SYSTEM
+#else
+#define EVUTIL_EAI_SYSTEM -911
+#endif
+
+#define EVUTIL_EAI_CANCEL -90001
+
+#if defined(AI_PASSIVE) && defined(EVENT__HAVE_GETADDRINFO)
+#define EVUTIL_AI_PASSIVE AI_PASSIVE
+#else
+#define EVUTIL_AI_PASSIVE 0x1000
+#endif
+#if defined(AI_CANONNAME) && defined(EVENT__HAVE_GETADDRINFO)
+#define EVUTIL_AI_CANONNAME AI_CANONNAME
+#else
+#define EVUTIL_AI_CANONNAME 0x2000
+#endif
+#if defined(AI_NUMERICHOST) && defined(EVENT__HAVE_GETADDRINFO)
+#define EVUTIL_AI_NUMERICHOST AI_NUMERICHOST
+#else
+#define EVUTIL_AI_NUMERICHOST 0x4000
+#endif
+#if defined(AI_NUMERICSERV) && defined(EVENT__HAVE_GETADDRINFO)
+#define EVUTIL_AI_NUMERICSERV AI_NUMERICSERV
+#else
+#define EVUTIL_AI_NUMERICSERV 0x8000
+#endif
+#if defined(AI_V4MAPPED) && defined(EVENT__HAVE_GETADDRINFO)
+#define EVUTIL_AI_V4MAPPED AI_V4MAPPED
+#else
+#define EVUTIL_AI_V4MAPPED 0x10000
+#endif
+#if defined(AI_ALL) && defined(EVENT__HAVE_GETADDRINFO)
+#define EVUTIL_AI_ALL AI_ALL
+#else
+#define EVUTIL_AI_ALL 0x20000
+#endif
+#if defined(AI_ADDRCONFIG) && defined(EVENT__HAVE_GETADDRINFO)
+#define EVUTIL_AI_ADDRCONFIG AI_ADDRCONFIG
+#else
+#define EVUTIL_AI_ADDRCONFIG 0x40000
+#endif
+/**@}*/
+
+struct evutil_addrinfo;
+/**
+ * This function clones getaddrinfo for systems that don't have it. For full
+ * details, see RFC 3493, section 6.1.
+ *
+ * Limitations:
+ * - When the system has no getaddrinfo, we fall back to gethostbyname_r or
+ * gethostbyname, with their attendant issues.
+ * - The AI_V4MAPPED and AI_ALL flags are not currently implemented.
+ *
+ * For a nonblocking variant, see evdns_getaddrinfo.
+ */
+EVENT2_EXPORT_SYMBOL
+int evutil_getaddrinfo(const char *nodename, const char *servname,
+ const struct evutil_addrinfo *hints_in, struct evutil_addrinfo **res);
+
+/** Release storage allocated by evutil_getaddrinfo or evdns_getaddrinfo. */
+EVENT2_EXPORT_SYMBOL
+void evutil_freeaddrinfo(struct evutil_addrinfo *ai);
+
+EVENT2_EXPORT_SYMBOL
+const char *evutil_gai_strerror(int err);
+
+/** Generate n bytes of secure pseudorandom data, and store them in buf.
+ *
+ * Current versions of Libevent use an ARC4-based random number generator,
+ * seeded using the platform's entropy source (/dev/urandom on Unix-like
+ * systems; CryptGenRandom on Windows). This is not actually as secure as it
+ * should be: ARC4 is a pretty lousy cipher, and the current implementation
+ * provides only rudimentary prediction- and backtracking-resistance. Don't
+ * use this for serious cryptographic applications.
+ */
+EVENT2_EXPORT_SYMBOL
+void evutil_secure_rng_get_bytes(void *buf, size_t n);
+
+/**
+ * Seed the secure random number generator if needed, and return 0 on
+ * success or -1 on failure.
+ *
+ * It is okay to call this function more than once; it will still return
+ * 0 if the RNG has been successfully seeded and -1 if it can't be
+ * seeded.
+ *
+ * Ordinarily you don't need to call this function from your own code;
+ * Libevent will seed the RNG itself the first time it needs good random
+ * numbers. You only need to call it if (a) you want to double-check
+ * that one of the seeding methods did succeed, or (b) you plan to drop
+ * the capability to seed (by chrooting, or dropping capabilities, or
+ * whatever), and you want to make sure that seeding happens before your
+ * program loses the ability to do it.
+ */
+EVENT2_EXPORT_SYMBOL
+int evutil_secure_rng_init(void);
+
+/**
+ * Set a filename to use in place of /dev/urandom for seeding the secure
+ * PRNG. Return 0 on success, -1 on failure.
+ *
+ * Call this function BEFORE calling any other initialization or RNG
+ * functions.
+ *
+ * (This string will _NOT_ be copied internally. Do not free it while any
+ * user of the secure RNG might be running. Don't pass anything other than a
+ * real /dev/...random device file here, or you might lose security.)
+ *
+ * This API is unstable, and might change in a future libevent version.
+ */
+EVENT2_EXPORT_SYMBOL
+int evutil_secure_rng_set_urandom_device_file(char *fname);
+
+/** Seed the random number generator with extra random bytes.
+
+ You should almost never need to call this function; it should be
+ sufficient to invoke evutil_secure_rng_init(), or let Libevent take
+ care of calling evutil_secure_rng_init() on its own.
+
+ If you call this function as a _replacement_ for the regular
+ entropy sources, then you need to be sure that your input
+ contains a fairly large amount of strong entropy. Doing so is
+ notoriously hard: most people who try get it wrong. Watch out!
+
+ @param dat a buffer full of a strong source of random numbers
+ @param datlen the number of bytes to read from datlen
+ */
+EVENT2_EXPORT_SYMBOL
+void evutil_secure_rng_add_bytes(const char *dat, size_t datlen);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT1_EVUTIL_H_INCLUDED_ */
diff --git a/libs/libevent/include/event2/visibility.h b/libs/libevent/include/event2/visibility.h
new file mode 100644
index 0000000000..fb16dbeedd
--- /dev/null
+++ b/libs/libevent/include/event2/visibility.h
@@ -0,0 +1,50 @@
+/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT2_VISIBILITY_H_INCLUDED_
+#define EVENT2_VISIBILITY_H_INCLUDED_
+
+#include <event2/event-config.h>
+
+#if defined(event_EXPORTS) || defined(event_extra_EXPORTS) || defined(event_core_EXPORTS)
+# if defined (__SUNPRO_C) && (__SUNPRO_C >= 0x550)
+# define EVENT2_EXPORT_SYMBOL __global
+# elif defined __GNUC__
+# define EVENT2_EXPORT_SYMBOL __attribute__ ((visibility("default")))
+# elif defined(_MSC_VER)
+# define EVENT2_EXPORT_SYMBOL extern __declspec(dllexport)
+# else
+# define EVENT2_EXPORT_SYMBOL /* unknown compiler */
+# endif
+#else
+# if defined(EVENT__NEED_DLLIMPORT) && defined(_MSC_VER) && !defined(EVENT_BUILDING_REGRESS_TEST)
+# define EVENT2_EXPORT_SYMBOL extern __declspec(dllimport)
+# else
+# define EVENT2_EXPORT_SYMBOL
+# endif
+#endif
+
+#endif /* EVENT2_VISIBILITY_H_INCLUDED_ */
diff --git a/libs/libevent/include/evhttp.h b/libs/libevent/include/evhttp.h
new file mode 100644
index 0000000000..549bc9b14a
--- /dev/null
+++ b/libs/libevent/include/evhttp.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT1_EVHTTP_H_INCLUDED_
+#define EVENT1_EVHTTP_H_INCLUDED_
+
+/** @file evhttp.h
+
+ An http implementation subsystem for Libevent.
+
+ The <evhttp.h> header is deprecated in Libevent 2.0 and later; please
+ use <event2/http.h> instead. Depending on what functionality you
+ need, you may also want to include more of the other <event2/...>
+ headers.
+ */
+
+#include <event.h>
+#include <event2/http.h>
+#include <event2/http_struct.h>
+#include <event2/http_compat.h>
+
+#endif /* EVENT1_EVHTTP_H_INCLUDED_ */
diff --git a/libs/libevent/include/evrpc.h b/libs/libevent/include/evrpc.h
new file mode 100644
index 0000000000..7e986f7dab
--- /dev/null
+++ b/libs/libevent/include/evrpc.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT1_EVRPC_H_INCLUDED_
+#define EVENT1_EVRPC_H_INCLUDED_
+
+/** @file evrpc.h
+
+ An RPC system for Libevent.
+
+ The <evrpc.h> header is deprecated in Libevent 2.0 and later; please
+ use <event2/rpc.h> instead. Depending on what functionality you
+ need, you may also want to include more of the other <event2/...>
+ headers.
+ */
+
+#include <event.h>
+#include <event2/rpc.h>
+#include <event2/rpc_struct.h>
+#include <event2/rpc_compat.h>
+
+#endif /* EVENT1_EVRPC_H_INCLUDED_ */
diff --git a/libs/libevent/include/evutil.h b/libs/libevent/include/evutil.h
new file mode 100644
index 0000000000..12c137d745
--- /dev/null
+++ b/libs/libevent/include/evutil.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT1_EVUTIL_H_INCLUDED_
+#define EVENT1_EVUTIL_H_INCLUDED_
+
+/** @file evutil.h
+
+ Utility and compatibility functions for Libevent.
+
+ The <evutil.h> header is deprecated in Libevent 2.0 and later; please
+ use <event2/util.h> instead.
+*/
+
+#include <event2/util.h>
+
+#endif /* EVENT1_EVUTIL_H_INCLUDED_ */
diff --git a/libs/libevent/include/include.am b/libs/libevent/include/include.am
new file mode 100644
index 0000000000..9aad2dba4a
--- /dev/null
+++ b/libs/libevent/include/include.am
@@ -0,0 +1,46 @@
+# include/Makefile.am for libevent
+# Copyright 2000-2007 Niels Provos
+# Copyright 2007-2012 Niels Provos and Nick Mathewson
+#
+# See LICENSE for copying information.
+
+include_event2dir = $(includedir)/event2
+
+EVENT2_EXPORT = \
+ include/event2/buffer.h \
+ include/event2/buffer_compat.h \
+ include/event2/bufferevent.h \
+ include/event2/bufferevent_compat.h \
+ include/event2/bufferevent_ssl.h \
+ include/event2/bufferevent_struct.h \
+ include/event2/dns.h \
+ include/event2/dns_compat.h \
+ include/event2/dns_struct.h \
+ include/event2/event.h \
+ include/event2/event_compat.h \
+ include/event2/event_struct.h \
+ include/event2/http.h \
+ include/event2/http_compat.h \
+ include/event2/http_struct.h \
+ include/event2/keyvalq_struct.h \
+ include/event2/listener.h \
+ include/event2/rpc.h \
+ include/event2/rpc_compat.h \
+ include/event2/rpc_struct.h \
+ include/event2/tag.h \
+ include/event2/tag_compat.h \
+ include/event2/thread.h \
+ include/event2/util.h \
+ include/event2/visibility.h
+
+## Without the nobase_ prefixing, Automake would strip "include/event2/" from
+## the source header filename to derive the installed header filename.
+## With nobase_ the installed path is $(includedir)/include/event2/ev*.h.
+
+if INSTALL_LIBEVENT
+include_event2_HEADERS = $(EVENT2_EXPORT)
+nodist_include_event2_HEADERS = include/event2/event-config.h
+else
+noinst_HEADERS += $(EVENT2_EXPORT)
+nodist_noinst_HEADERS = include/event2/event-config.h
+endif
diff --git a/libs/libevent/libevent_12.vcxproj b/libs/libevent/libevent_12.vcxproj
new file mode 100644
index 0000000000..11876a5aa9
--- /dev/null
+++ b/libs/libevent/libevent_12.vcxproj
@@ -0,0 +1,212 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Debug|x64">
+ <Configuration>Debug</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|x64">
+ <Configuration>Release</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGUID>{46D53888-E60E-32A7-91EB-6336DE0D84D8}</ProjectGUID>
+ <ProjectName>libevent</ProjectName>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <PlatformToolset>v120_xp</PlatformToolset>
+ <ConfigurationType>DynamicLibrary</ConfigurationType>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>DynamicLibrary</ConfigurationType>
+ <PlatformToolset>v120_xp</PlatformToolset>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+ <ConfigurationType>DynamicLibrary</ConfigurationType>
+ <PlatformToolset>v120_xp</PlatformToolset>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+ <ConfigurationType>DynamicLibrary</ConfigurationType>
+ <PlatformToolset>v120_xp</PlatformToolset>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup>
+ <_ProjectFileVersion>10.0.40219.1</_ProjectFileVersion>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(SolutionDir)\$(Configuration)\Libs\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)\$(Configuration)64\Libs\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(SolutionDir)\$(Configuration)\Obj\$(ProjectName)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)\$(Configuration)64\Obj\$(ProjectName)\</IntDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(SolutionDir)\$(Configuration)\Libs\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)\$(Configuration)64\Libs\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(SolutionDir)\$(Configuration)\Obj\$(ProjectName)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)\$(Configuration)64\Obj\$(ProjectName)\</IntDir>
+ <IgnoreImportLibrary>true</IgnoreImportLibrary>
+ </PropertyGroup>
+ <PropertyGroup>
+ <TargetExt>.mir</TargetExt>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <AdditionalIncludeDirectories>include;src\compat;..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <Optimization>Disabled</Optimization>
+ <WarningLevel>Level3</WarningLevel>
+ <PreprocessorDefinitions>_WINDOWS;_DEBUG;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;libeay32.lib;ssleay32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>..\win32</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Windows</SubSystem>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <AdditionalIncludeDirectories>include;src\compat;..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <Optimization>Full</Optimization>
+ <WarningLevel>Level3</WarningLevel>
+ <PreprocessorDefinitions>_WINDOWS;NDEBUG;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;libeay32.lib;ssleay32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>..\win32</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Windows</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <LinkTimeCodeGeneration>UseLinkTimeCodeGeneration</LinkTimeCodeGeneration>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <AdditionalIncludeDirectories>include;src\compat;..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <Optimization>Disabled</Optimization>
+ <WarningLevel>Level3</WarningLevel>
+ <PreprocessorDefinitions>_WINDOWS;_DEBUG;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;libeay32.lib;ssleay32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>..\win64</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Windows</SubSystem>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <AdditionalIncludeDirectories>include;src\compat;..\..\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <Optimization>Full</Optimization>
+ <WarningLevel>Level3</WarningLevel>
+ <PreprocessorDefinitions>_WINDOWS;NDEBUG;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;libeay32.lib;ssleay32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>..\win64</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Windows</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <LinkTimeCodeGeneration>UseLinkTimeCodeGeneration</LinkTimeCodeGeneration>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClInclude Include="src\bufferevent-internal.h" />
+ <ClInclude Include="src\changelist-internal.h" />
+ <ClInclude Include="src\defer-internal.h" />
+ <ClInclude Include="src\epolltable-internal.h" />
+ <ClInclude Include="src\evbuffer-internal.h" />
+ <ClInclude Include="src\event-internal.h" />
+ <ClInclude Include="src\evmap-internal.h" />
+ <ClInclude Include="src\evrpc-internal.h" />
+ <ClInclude Include="src\evsignal-internal.h" />
+ <ClInclude Include="src\evthread-internal.h" />
+ <ClInclude Include="src\ht-internal.h" />
+ <ClInclude Include="src\http-internal.h" />
+ <ClInclude Include="src\iocp-internal.h" />
+ <ClInclude Include="src\ipv6-internal.h" />
+ <ClInclude Include="src\log-internal.h" />
+ <ClInclude Include="src\minheap-internal.h" />
+ <ClInclude Include="src\mm-internal.h" />
+ <ClInclude Include="src\ratelim-internal.h" />
+ <ClInclude Include="src\strlcpy-internal.h" />
+ <ClInclude Include="src\util-internal.h" />
+ <ClInclude Include="src\compat\sys\queue.h" />
+ <ClInclude Include="src\WIN32-Code\getopt.h" />
+ <ClInclude Include="include\evdns.h" />
+ <ClInclude Include="include\evrpc.h" />
+ <ClInclude Include="include\event.h" />
+ <ClInclude Include="include\evhttp.h" />
+ <ClInclude Include="include\evutil.h" />
+ <ClInclude Include="include\event2\buffer.h" />
+ <ClInclude Include="include\event2\bufferevent.h" />
+ <ClInclude Include="include\event2\bufferevent_compat.h" />
+ <ClInclude Include="include\event2\bufferevent_struct.h" />
+ <ClInclude Include="include\event2\buffer_compat.h" />
+ <ClInclude Include="include\event2\dns.h" />
+ <ClInclude Include="include\event2\dns_compat.h" />
+ <ClInclude Include="include\event2\dns_struct.h" />
+ <ClInclude Include="include\event2\event.h" />
+ <ClInclude Include="include\event2\event_compat.h" />
+ <ClInclude Include="include\event2\event_struct.h" />
+ <ClInclude Include="include\event2\http.h" />
+ <ClInclude Include="include\event2\http_compat.h" />
+ <ClInclude Include="include\event2\http_struct.h" />
+ <ClInclude Include="include\event2\keyvalq_struct.h" />
+ <ClInclude Include="include\event2\listener.h" />
+ <ClInclude Include="include\event2\rpc.h" />
+ <ClInclude Include="include\event2\rpc_compat.h" />
+ <ClInclude Include="include\event2\rpc_struct.h" />
+ <ClInclude Include="include\event2\tag.h" />
+ <ClInclude Include="include\event2\tag_compat.h" />
+ <ClInclude Include="include\event2\thread.h" />
+ <ClInclude Include="include\event2\util.h" />
+ <ClInclude Include="include\event2\visibility.h" />
+ <ClInclude Include="include\event2\event-config.h" />
+ <ClInclude Include="include\event2\bufferevent_ssl.h" />
+ <ClCompile Include="src\buffer.c" />
+ <ClCompile Include="src\bufferevent.c" />
+ <ClCompile Include="src\bufferevent_filter.c" />
+ <ClCompile Include="src\bufferevent_pair.c" />
+ <ClCompile Include="src\bufferevent_ratelim.c" />
+ <ClCompile Include="src\bufferevent_sock.c" />
+ <ClCompile Include="src\event.c" />
+ <ClCompile Include="src\evmap.c" />
+ <ClCompile Include="src\evthread.c" />
+ <ClCompile Include="src\evutil.c" />
+ <ClCompile Include="src\evutil_rand.c" />
+ <ClCompile Include="src\evutil_time.c" />
+ <ClCompile Include="src\listener.c" />
+ <ClCompile Include="src\log.c" />
+ <ClCompile Include="src\signal.c" />
+ <ClCompile Include="src\strlcpy.c" />
+ <ClCompile Include="src\bufferevent_openssl.c" />
+ <ClCompile Include="src\evthread_win32.c" />
+ <ClCompile Include="src\buffer_iocp.c" />
+ <ClCompile Include="src\bufferevent_async.c" />
+ <ClCompile Include="src\event_iocp.c" />
+ <ClCompile Include="src\win32select.c" />
+ <ClCompile Include="src\event_tagging.c" />
+ <ClCompile Include="src\http.c" />
+ <ClCompile Include="src\evdns.c" />
+ <ClCompile Include="src\evrpc.c" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project> \ No newline at end of file
diff --git a/libs/libevent/libevent_12.vcxproj.filters b/libs/libevent/libevent_12.vcxproj.filters
new file mode 100644
index 0000000000..52a3297f3a
--- /dev/null
+++ b/libs/libevent/libevent_12.vcxproj.filters
@@ -0,0 +1,264 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<Project ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <ClCompile Include="src\buffer.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\bufferevent.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\bufferevent_filter.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\bufferevent_pair.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\bufferevent_ratelim.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\bufferevent_sock.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\event.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\evmap.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\evthread.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\evutil.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\evutil_rand.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\evutil_time.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\listener.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\log.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\signal.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\strlcpy.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\bufferevent_openssl.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\evthread_win32.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\buffer_iocp.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\bufferevent_async.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\event_iocp.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\win32select.c">
+ <Filter>Source Core</Filter>
+ </ClCompile>
+ <ClCompile Include="src\event_tagging.c">
+ <Filter>Source Extra</Filter>
+ </ClCompile>
+ <ClCompile Include="src\http.c">
+ <Filter>Source Extra</Filter>
+ </ClCompile>
+ <ClCompile Include="src\evdns.c">
+ <Filter>Source Extra</Filter>
+ </ClCompile>
+ <ClCompile Include="src\evrpc.c">
+ <Filter>Source Extra</Filter>
+ </ClCompile>
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="src\bufferevent-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\changelist-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\defer-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\epolltable-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\evbuffer-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\event-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\evmap-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\evrpc-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\evsignal-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\evthread-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\ht-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\http-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\iocp-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\ipv6-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\log-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\minheap-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\mm-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\ratelim-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\strlcpy-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\util-internal.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\compat\sys\queue.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="src\WIN32-Code\getopt.h">
+ <Filter>Headers Private</Filter>
+ </ClInclude>
+ <ClInclude Include="include\evdns.h">
+ <Filter>Header Compat</Filter>
+ </ClInclude>
+ <ClInclude Include="include\evrpc.h">
+ <Filter>Header Compat</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event.h">
+ <Filter>Header Compat</Filter>
+ </ClInclude>
+ <ClInclude Include="include\evhttp.h">
+ <Filter>Header Compat</Filter>
+ </ClInclude>
+ <ClInclude Include="include\evutil.h">
+ <Filter>Header Compat</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\buffer.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\bufferevent.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\bufferevent_compat.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\bufferevent_struct.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\buffer_compat.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\dns.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\dns_compat.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\dns_struct.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\event.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\event_compat.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\event_struct.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\http.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\http_compat.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\http_struct.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\keyvalq_struct.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\listener.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\rpc.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\rpc_compat.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\rpc_struct.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\tag.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\tag_compat.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\thread.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\util.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\visibility.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\event-config.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ <ClInclude Include="include\event2\bufferevent_ssl.h">
+ <Filter>Headers Public</Filter>
+ </ClInclude>
+ </ItemGroup>
+ <ItemGroup>
+ <Filter Include="Header Files">
+ <UniqueIdentifier>{918036C9-880E-383E-A0A0-E2F1133C4BF0}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="Headers Private">
+ <UniqueIdentifier>{403FD3F2-243C-3501-846D-EC6DB43B0865}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="Header Compat">
+ <UniqueIdentifier>{6259BD7B-787D-3652-9A11-0698644E1128}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="Headers Public">
+ <UniqueIdentifier>{D8BDFFA3-4BC2-3E42-BC8E-450D2147C8CA}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="Source Core">
+ <UniqueIdentifier>{70C5E5A7-1F69-39E0-8BF9-5F35EFB07481}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="Source Extra">
+ <UniqueIdentifier>{D7733A27-70AE-358A-911E-F7687E9FECFF}</UniqueIdentifier>
+ </Filter>
+ </ItemGroup>
+</Project>
diff --git a/libs/libevent/src/WIN32-Code/getopt.c b/libs/libevent/src/WIN32-Code/getopt.c
new file mode 100644
index 0000000000..0fcba5d915
--- /dev/null
+++ b/libs/libevent/src/WIN32-Code/getopt.c
@@ -0,0 +1,149 @@
+/* $NetBSD: getopt.c,v 1.16 1999/12/02 13:15:56 kleink Exp $ */
+
+/*
+ * Copyright (c) 1987, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS
+ * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if 0
+static char sccsid[] = "@(#)getopt.c 8.3 (Berkeley) 4/27/95";
+#endif
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+
+#define __P(x) x
+#define _DIAGASSERT(x) assert(x)
+
+#ifdef __weak_alias
+__weak_alias(getopt,_getopt);
+#endif
+
+
+int opterr = 1, /* if error message should be printed */
+ optind = 1, /* index into parent argv vector */
+ optopt, /* character checked for validity */
+ optreset; /* reset getopt */
+char *optarg; /* argument associated with option */
+
+static char * _progname __P((char *));
+int getopt_internal __P((int, char * const *, const char *));
+
+static char *
+_progname(nargv0)
+ char * nargv0;
+{
+ char * tmp;
+
+ _DIAGASSERT(nargv0 != NULL);
+
+ tmp = strrchr(nargv0, '/');
+ if (tmp)
+ tmp++;
+ else
+ tmp = nargv0;
+ return(tmp);
+}
+
+#define BADCH (int)'?'
+#define BADARG (int)':'
+#define EMSG ""
+
+/*
+ * getopt --
+ * Parse argc/argv argument vector.
+ */
+int
+getopt(nargc, nargv, ostr)
+ int nargc;
+ char * const nargv[];
+ const char *ostr;
+{
+ static char *__progname = 0;
+ static char *place = EMSG; /* option letter processing */
+ char *oli; /* option letter list index */
+ __progname = __progname?__progname:_progname(*nargv);
+
+ _DIAGASSERT(nargv != NULL);
+ _DIAGASSERT(ostr != NULL);
+
+ if (optreset || !*place) { /* update scanning pointer */
+ optreset = 0;
+ if (optind >= nargc || *(place = nargv[optind]) != '-') {
+ place = EMSG;
+ return (-1);
+ }
+ if (place[1] && *++place == '-' /* found "--" */
+ && place[1] == '\0') {
+ ++optind;
+ place = EMSG;
+ return (-1);
+ }
+ } /* option letter okay? */
+ if ((optopt = (int)*place++) == (int)':' ||
+ !(oli = strchr(ostr, optopt))) {
+ /*
+ * if the user didn't specify '-' as an option,
+ * assume it means -1.
+ */
+ if (optopt == (int)'-')
+ return (-1);
+ if (!*place)
+ ++optind;
+ if (opterr && *ostr != ':')
+ (void)fprintf(stderr,
+ "%s: illegal option -- %c\n", __progname, optopt);
+ return (BADCH);
+ }
+ if (*++oli != ':') { /* don't need argument */
+ optarg = NULL;
+ if (!*place)
+ ++optind;
+ }
+ else { /* need an argument */
+ if (*place) /* no white space */
+ optarg = place;
+ else if (nargc <= ++optind) { /* no arg */
+ place = EMSG;
+ if (*ostr == ':')
+ return (BADARG);
+ if (opterr)
+ (void)fprintf(stderr,
+ "%s: option requires an argument -- %c\n",
+ __progname, optopt);
+ return (BADCH);
+ }
+ else /* white space */
+ optarg = nargv[optind];
+ place = EMSG;
+ ++optind;
+ }
+ return (optopt); /* dump back option letter */
+}
+
diff --git a/libs/libevent/src/WIN32-Code/getopt.h b/libs/libevent/src/WIN32-Code/getopt.h
new file mode 100644
index 0000000000..796f455050
--- /dev/null
+++ b/libs/libevent/src/WIN32-Code/getopt.h
@@ -0,0 +1,33 @@
+#ifndef __GETOPT_H__
+#define __GETOPT_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int opterr; /* if error message should be printed */
+extern int optind; /* index into parent argv vector */
+extern int optopt; /* character checked for validity */
+extern int optreset; /* reset getopt */
+extern char *optarg; /* argument associated with option */
+
+struct option
+{
+ const char *name;
+ int has_arg;
+ int *flag;
+ int val;
+};
+
+#define no_argument 0
+#define required_argument 1
+#define optional_argument 2
+
+int getopt(int, char**, const char*);
+int getopt_long(int, char**, const char*, const struct option*, int*);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __GETOPT_H__ */
diff --git a/libs/libevent/src/WIN32-Code/getopt_long.c b/libs/libevent/src/WIN32-Code/getopt_long.c
new file mode 100644
index 0000000000..03f0c01a15
--- /dev/null
+++ b/libs/libevent/src/WIN32-Code/getopt_long.c
@@ -0,0 +1,233 @@
+
+/*
+ * Copyright (c) 1987, 1993, 1994, 1996
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS
+ * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "getopt.h"
+
+extern int opterr; /* if error message should be printed */
+extern int optind; /* index into parent argv vector */
+extern int optopt; /* character checked for validity */
+extern int optreset; /* reset getopt */
+extern char *optarg; /* argument associated with option */
+
+#define __P(x) x
+#define _DIAGASSERT(x) assert(x)
+
+static char * __progname __P((char *));
+int getopt_internal __P((int, char * const *, const char *));
+
+static char *
+__progname(nargv0)
+ char * nargv0;
+{
+ char * tmp;
+
+ _DIAGASSERT(nargv0 != NULL);
+
+ tmp = strrchr(nargv0, '/');
+ if (tmp)
+ tmp++;
+ else
+ tmp = nargv0;
+ return(tmp);
+}
+
+#define BADCH (int)'?'
+#define BADARG (int)':'
+#define EMSG ""
+
+/*
+ * getopt --
+ * Parse argc/argv argument vector.
+ */
+int
+getopt_internal(nargc, nargv, ostr)
+ int nargc;
+ char * const *nargv;
+ const char *ostr;
+{
+ static char *place = EMSG; /* option letter processing */
+ char *oli; /* option letter list index */
+
+ _DIAGASSERT(nargv != NULL);
+ _DIAGASSERT(ostr != NULL);
+
+ if (optreset || !*place) { /* update scanning pointer */
+ optreset = 0;
+ if (optind >= nargc || *(place = nargv[optind]) != '-') {
+ place = EMSG;
+ return (-1);
+ }
+ if (place[1] && *++place == '-') { /* found "--" */
+ /* ++optind; */
+ place = EMSG;
+ return (-2);
+ }
+ } /* option letter okay? */
+ if ((optopt = (int)*place++) == (int)':' ||
+ !(oli = strchr(ostr, optopt))) {
+ /*
+ * if the user didn't specify '-' as an option,
+ * assume it means -1.
+ */
+ if (optopt == (int)'-')
+ return (-1);
+ if (!*place)
+ ++optind;
+ if (opterr && *ostr != ':')
+ (void)fprintf(stderr,
+ "%s: illegal option -- %c\n", __progname(nargv[0]), optopt);
+ return (BADCH);
+ }
+ if (*++oli != ':') { /* don't need argument */
+ optarg = NULL;
+ if (!*place)
+ ++optind;
+ } else { /* need an argument */
+ if (*place) /* no white space */
+ optarg = place;
+ else if (nargc <= ++optind) { /* no arg */
+ place = EMSG;
+ if ((opterr) && (*ostr != ':'))
+ (void)fprintf(stderr,
+ "%s: option requires an argument -- %c\n",
+ __progname(nargv[0]), optopt);
+ return (BADARG);
+ } else /* white space */
+ optarg = nargv[optind];
+ place = EMSG;
+ ++optind;
+ }
+ return (optopt); /* dump back option letter */
+}
+
+#if 0
+/*
+ * getopt --
+ * Parse argc/argv argument vector.
+ */
+int
+getopt2(nargc, nargv, ostr)
+ int nargc;
+ char * const *nargv;
+ const char *ostr;
+{
+ int retval;
+
+ if ((retval = getopt_internal(nargc, nargv, ostr)) == -2) {
+ retval = -1;
+ ++optind;
+ }
+ return(retval);
+}
+#endif
+
+/*
+ * getopt_long --
+ * Parse argc/argv argument vector.
+ */
+int
+getopt_long(nargc, nargv, options, long_options, index)
+ int nargc;
+ char ** nargv;
+ const char * options;
+ const struct option * long_options;
+ int * index;
+{
+ int retval;
+
+ _DIAGASSERT(nargv != NULL);
+ _DIAGASSERT(options != NULL);
+ _DIAGASSERT(long_options != NULL);
+ /* index may be NULL */
+
+ if ((retval = getopt_internal(nargc, nargv, options)) == -2) {
+ char *current_argv = nargv[optind++] + 2, *has_equal;
+ int i, current_argv_len, match = -1;
+
+ if (*current_argv == '\0') {
+ return(-1);
+ }
+ if ((has_equal = strchr(current_argv, '=')) != NULL) {
+ current_argv_len = has_equal - current_argv;
+ has_equal++;
+ } else
+ current_argv_len = strlen(current_argv);
+
+ for (i = 0; long_options[i].name; i++) {
+ if (strncmp(current_argv, long_options[i].name, current_argv_len))
+ continue;
+
+ if (strlen(long_options[i].name) == (unsigned)current_argv_len) {
+ match = i;
+ break;
+ }
+ if (match == -1)
+ match = i;
+ }
+ if (match != -1) {
+ if (long_options[match].has_arg == required_argument ||
+ long_options[match].has_arg == optional_argument) {
+ if (has_equal)
+ optarg = has_equal;
+ else
+ optarg = nargv[optind++];
+ }
+ if ((long_options[match].has_arg == required_argument)
+ && (optarg == NULL)) {
+ /*
+ * Missing argument, leading :
+ * indicates no error should be generated
+ */
+ if ((opterr) && (*options != ':'))
+ (void)fprintf(stderr,
+ "%s: option requires an argument -- %s\n",
+ __progname(nargv[0]), current_argv);
+ return (BADARG);
+ }
+ } else { /* No matching argument */
+ if ((opterr) && (*options != ':'))
+ (void)fprintf(stderr,
+ "%s: illegal option -- %s\n", __progname(nargv[0]), current_argv);
+ return (BADCH);
+ }
+ if (long_options[match].flag) {
+ *long_options[match].flag = long_options[match].val;
+ retval = 0;
+ } else
+ retval = long_options[match].val;
+ if (index)
+ *index = match;
+ }
+ return(retval);
+}
diff --git a/libs/libevent/src/WIN32-Code/nmake/evconfig-private.h b/libs/libevent/src/WIN32-Code/nmake/evconfig-private.h
new file mode 100644
index 0000000000..88e206272b
--- /dev/null
+++ b/libs/libevent/src/WIN32-Code/nmake/evconfig-private.h
@@ -0,0 +1,6 @@
+#if !defined(EVENT_EVCONFIG__PRIVATE_H_) && !defined(__MINGW32__)
+#define EVENT_EVCONFIG__PRIVATE_H_
+
+/* Nothing to see here. Move along. */
+
+#endif
diff --git a/libs/libevent/src/WIN32-Code/nmake/event2/event-config.h b/libs/libevent/src/WIN32-Code/nmake/event2/event-config.h
new file mode 100644
index 0000000000..8cbf190289
--- /dev/null
+++ b/libs/libevent/src/WIN32-Code/nmake/event2/event-config.h
@@ -0,0 +1,360 @@
+/* event2/event-config.h
+ *
+ * This file was generated by autoconf when libevent was built, and post-
+ * processed by Libevent so that its macros would have a uniform prefix.
+ *
+ * DO NOT EDIT THIS FILE.
+ *
+ * Do not rely on macros in this file existing in later versions.
+ */
+#ifndef EVENT_CONFIG_H__
+#define EVENT_CONFIG_H__
+/* config.h. Generated by configure. */
+/* config.h.in. Generated from configure.in by autoheader. */
+
+/* Define if libevent should not allow replacing the mm functions */
+/* #undef EVENT__DISABLE_MM_REPLACEMENT */
+
+/* Define if libevent should not be compiled with thread support */
+/* #undef EVENT__DISABLE_THREAD_SUPPORT */
+
+/* Define if clock_gettime is available in libc */
+/* #undef _EVENT_DNS_USE_CPU_CLOCK_FOR_ID */
+
+/* Define is no secure id variant is available */
+/* #define _EVENT_DNS_USE_GETTIMEOFDAY_FOR_ID 1 */
+#define EVENT_DNS_USE_FTIME_FOR_ID_ 1
+
+/* Define to 1 if you have the <arpa/inet.h> header file. */
+/* #undef EVENT__HAVE_ARPA_INET_H */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+/* #undef EVENT__HAVE_CLOCK_GETTIME */
+
+/* Define if /dev/poll is available */
+/* #undef EVENT__HAVE_DEVPOLL */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+/* #undef EVENT__HAVE_DLFCN_H */
+
+/* Define if your system supports the epoll system calls */
+/* #undef EVENT__HAVE_EPOLL */
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+/* #undef EVENT__HAVE_EPOLL_CTL */
+
+/* Define to 1 if you have the `eventfd' function. */
+/* #undef EVENT__HAVE_EVENTFD */
+
+/* Define if your system supports event ports */
+/* #undef EVENT__HAVE_EVENT_PORTS */
+
+/* Define to 1 if you have the `fcntl' function. */
+/* #undef EVENT__HAVE_FCNTL */
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define EVENT__HAVE_FCNTL_H 1
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+#define EVENT__HAVE_GETADDRINFO 1
+
+/* Define to 1 if you have the `getnameinfo' function. */
+#define EVENT__HAVE_GETNAMEINFO 1
+
+/* Define to 1 if you have the `getprotobynumber' function. */
+#define EVENT__HAVE_GETPROTOBYNUMBER 1
+
+/* Define to 1 if you have the `getservbyname' function. */
+#define EVENT__HAVE_GETSERVBYNAME 1
+
+/* Define to 1 if you have the `gettimeofday' function. */
+/* #define EVENT__HAVE_GETTIMEOFDAY 1 */
+
+/* Define to 1 if you have the `inet_ntop' function. */
+/* #undef EVENT__HAVE_INET_NTOP */
+
+/* Define to 1 if you have the `inet_pton' function. */
+/* #undef EVENT__HAVE_INET_PTON */
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+/* #define EVENT__HAVE_INTTYPES_H 1 */
+
+/* Define to 1 if you have the `kqueue' function. */
+/* #undef EVENT__HAVE_KQUEUE */
+
+/* Define if the system has zlib */
+/* #undef EVENT__HAVE_LIBZ */
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define EVENT__HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the `mmap' function. */
+/* #undef EVENT__HAVE_MMAP */
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+/* #undef EVENT__HAVE_NETINET_IN6_H */
+
+/* Define to 1 if you have the <netinet/in.h> header file. */
+/* #undef EVENT__HAVE_NETINET_IN_H */
+
+/* Define to 1 if you have the `pipe' function. */
+/* #undef EVENT__HAVE_PIPE */
+
+/* Define to 1 if you have the `poll' function. */
+/* #undef EVENT__HAVE_POLL */
+
+/* Define to 1 if you have the <poll.h> header file. */
+/* #undef EVENT__HAVE_POLL_H */
+
+/* Define to 1 if you have the `port_create' function. */
+/* #undef EVENT__HAVE_PORT_CREATE */
+
+/* Define to 1 if you have the <port.h> header file. */
+/* #undef EVENT__HAVE_PORT_H */
+
+/* Define if you have POSIX threads libraries and header files. */
+/* #undef EVENT__HAVE_PTHREAD */
+
+/* Define if we have pthreads on this system */
+/* #undef EVENT__HAVE_PTHREADS */
+
+/* Define to 1 if the system has the type `sa_family_t'. */
+/* #undef EVENT__HAVE_SA_FAMILY_T */
+
+/* Define to 1 if you have the `select' function. */
+/* #undef EVENT__HAVE_SELECT */
+
+/* Define to 1 if you have the `sendfile' function. */
+/* #undef EVENT__HAVE_SENDFILE */
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+/* #undef EVENT__HAVE_SETFD */
+
+/* Define to 1 if you have the `sigaction' function. */
+/* #undef EVENT__HAVE_SIGACTION */
+
+/* Define to 1 if you have the `signal' function. */
+#define EVENT__HAVE_SIGNAL 1
+
+/* Define to 1 if you have the `splice' function. */
+/* #undef EVENT__HAVE_SPLICE */
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#define EVENT__HAVE_STDARG_H 1
+
+/* Define to 1 if you have the <stddef.h> header file. */
+#define EVENT__HAVE_STDDEF_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+/* #define EVENT__HAVE_STDINT_H 1 */
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define EVENT__HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define EVENT__HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define EVENT__HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strlcpy' function. */
+/* #undef EVENT__HAVE_STRLCPY */
+
+/* Define to 1 if you have the `strsep' function. */
+/* #undef EVENT__HAVE_STRSEP */
+
+/* Define to 1 if you have the `strtok_r' function. */
+/* #undef EVENT__HAVE_STRTOK_R */
+
+/* Define to 1 if you have the `strtoll' function. */
+/* #define EVENT__HAVE_STRTOLL 1 */
+
+#define EVENT__HAVE_STRUCT_ADDRINFO 1
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#define EVENT__HAVE_STRUCT_IN6_ADDR 1
+
+/* Define to 1 if `s6_addr16' is member of `struct in6_addr'. */
+#define EVENT__HAVE_STRUCT_IN6_ADDR_S6_ADDR16 1
+
+/* Define to 1 if `s6_addr32' is member of `struct in6_addr'. */
+#define EVENT__HAVE_STRUCT_IN6_ADDR_S6_ADDR32 1
+
+/* Define to 1 if the system has the type `struct sockaddr_in6'. */
+#define EVENT__HAVE_STRUCT_SOCKADDR_IN6 1
+
+/* Define to 1 if `sin6_len' is member of `struct sockaddr_in6'. */
+/* #undef EVENT__HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN */
+
+/* Define to 1 if `sin_len' is member of `struct sockaddr_in'. */
+/* #undef EVENT__HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */
+
+/* Define to 1 if the system has the type `struct sockaddr_storage'. */
+#define EVENT__HAVE_STRUCT_SOCKADDR_STORAGE 1
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+/* #undef EVENT__HAVE_SYS_DEVPOLL_H */
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+/* #undef EVENT__HAVE_SYS_EPOLL_H */
+
+/* Define to 1 if you have the <sys/eventfd.h> header file. */
+/* #undef EVENT__HAVE_SYS_EVENTFD_H */
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+/* #undef EVENT__HAVE_SYS_EVENT_H */
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+/* #undef EVENT__HAVE_SYS_IOCTL_H */
+
+/* Define to 1 if you have the <sys/mman.h> header file. */
+/* #undef EVENT__HAVE_SYS_MMAN_H */
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+/* #define EVENT__HAVE_SYS_PARAM_H 1 */
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+/* #undef EVENT__HAVE_SYS_QUEUE_H */
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+/* #undef EVENT__HAVE_SYS_SELECT_H */
+
+/* Define to 1 if you have the <sys/sendfile.h> header file. */
+/* #undef EVENT__HAVE_SYS_SENDFILE_H */
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+/* #undef EVENT__HAVE_SYS_SOCKET_H */
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define EVENT__HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+/* #define EVENT__HAVE_SYS_TIME_H 1 */
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define EVENT__HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the <sys/uio.h> header file. */
+/* #undef EVENT__HAVE_SYS_UIO_H */
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+/* #undef EVENT__HAVE_TAILQFOREACH */
+
+/* Define if timeradd is defined in <sys/time.h> */
+/* #undef EVENT__HAVE_TIMERADD */
+
+/* Define if timerclear is defined in <sys/time.h> */
+#define EVENT__HAVE_TIMERCLEAR 1
+
+/* Define if timercmp is defined in <sys/time.h> */
+#define EVENT__HAVE_TIMERCMP 1
+
+/* Define if timerisset is defined in <sys/time.h> */
+#define EVENT__HAVE_TIMERISSET 1
+
+/* Define to 1 if the system has the type `uint16_t'. */
+/* #define EVENT__HAVE_UINT16_T 1 */
+
+/* Define to 1 if the system has the type `uint32_t'. */
+/* #define EVENT__HAVE_UINT32_T 1 */
+
+/* Define to 1 if the system has the type `uint64_t'. */
+/* #define EVENT__HAVE_UINT64_T 1 */
+
+/* Define to 1 if the system has the type `uint8_t'. */
+/* #define EVENT__HAVE_UINT8_T 1 */
+
+/* Define to 1 if you have the <unistd.h> header file. */
+/* #define EVENT__HAVE_UNISTD_H 1 */
+
+/* Define to 1 if you have the `vasprintf' function. */
+/* #undef EVENT__HAVE_VASPRINTF */
+
+/* Define if kqueue works correctly with pipes */
+/* #undef EVENT__HAVE_WORKING_KQUEUE */
+
+/* Numeric representation of the version */
+#define EVENT__NUMERIC_VERSION 0x02010500
+
+/* Name of package */
+#define EVENT__PACKAGE "libevent"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define EVENT__PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define EVENT__PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define EVENT__PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define EVENT__PACKAGE_TARNAME ""
+
+/* Define to the version of this package. */
+#define EVENT__PACKAGE_VERSION ""
+
+/* Define to necessary symbol if this constant uses a non-standard name on
+ your system. */
+/* #undef EVENT__PTHREAD_CREATE_JOINABLE */
+
+/* The size of a `int', as computed by sizeof. */
+#define EVENT__SIZEOF_INT 4
+
+/* The size of a `long', as computed by sizeof. */
+#define EVENT__SIZEOF_LONG 4
+
+/* The size of a `long long', as computed by sizeof. */
+#define EVENT__SIZEOF_LONG_LONG 8
+
+/* The size of a `short', as computed by sizeof. */
+#define EVENT__SIZEOF_SHORT 2
+
+/* The size of `size_t', as computed by sizeof. */
+#ifdef _WIN64
+#define EVENT__SIZEOF_SIZE_T 8
+#else
+#define EVENT__SIZEOF_SIZE_T 4
+#endif
+
+/* The size of `void *', as computed by sizeof. */
+#ifdef _WIN64
+#define EVENT__SIZEOF_VOID_P 8
+#else
+#define EVENT__SIZEOF_VOID_P 4
+#endif
+
+/* Define to 1 if you have the ANSI C header files. */
+#define EVENT__STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#define EVENT__TIME_WITH_SYS_TIME 1
+
+/* Version number of package */
+#define EVENT__VERSION "2.1.5-beta"
+
+/* Define to appropriate substitue if compiler doesnt have __func__ */
+#define EVENT____func__ __FUNCTION__
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef EVENT__const */
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+ calls it, or to nothing if 'inline' is not supported under any name. */
+#ifndef _EVENT___cplusplus
+#define EVENT__inline __inline
+#endif
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef EVENT__pid_t */
+
+/* Define to `unsigned' if <sys/types.h> does not define. */
+/* #undef EVENT__size_t */
+
+/* Define to unsigned int if you dont have it */
+#define EVENT__socklen_t unsigned int
+
+/* Define to `int' if <sys/types.h> does not define. */
+#define EVENT__ssize_t SSIZE_T
+
+#endif
diff --git a/libs/libevent/src/WIN32-Code/tree.h b/libs/libevent/src/WIN32-Code/tree.h
new file mode 100644
index 0000000000..2ccfbf20ac
--- /dev/null
+++ b/libs/libevent/src/WIN32-Code/tree.h
@@ -0,0 +1,677 @@
+/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
+/*
+ * Copyright 2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SYS_TREE_H_
+#define _SYS_TREE_H_
+
+/*
+ * This file defines data structures for different types of trees:
+ * splay trees and red-black trees.
+ *
+ * A splay tree is a self-organizing data structure. Every operation
+ * on the tree causes a splay to happen. The splay moves the requested
+ * node to the root of the tree and partly rebalances it.
+ *
+ * This has the benefit that request locality causes faster lookups as
+ * the requested nodes move to the top of the tree. On the other hand,
+ * every lookup causes memory writes.
+ *
+ * The Balance Theorem bounds the total access time for m operations
+ * and n inserts on an initially empty tree as O((m + n)lg n). The
+ * amortized cost for a sequence of m accesses to a splay tree is O(lg n);
+ *
+ * A red-black tree is a binary search tree with the node color as an
+ * extra attribute. It fulfills a set of conditions:
+ * - every search path from the root to a leaf consists of the
+ * same number of black nodes,
+ * - each red node (except for the root) has a black parent,
+ * - each leaf node is black.
+ *
+ * Every operation on a red-black tree is bounded as O(lg n).
+ * The maximum height of a red-black tree is 2lg (n+1).
+ */
+
+#define SPLAY_HEAD(name, type) \
+struct name { \
+ struct type *sph_root; /* root of the tree */ \
+}
+
+#define SPLAY_INITIALIZER(root) \
+ { NULL }
+
+#define SPLAY_INIT(root) do { \
+ (root)->sph_root = NULL; \
+} while (0)
+
+#define SPLAY_ENTRY(type) \
+struct { \
+ struct type *spe_left; /* left element */ \
+ struct type *spe_right; /* right element */ \
+}
+
+#define SPLAY_LEFT(elm, field) (elm)->field.spe_left
+#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right
+#define SPLAY_ROOT(head) (head)->sph_root
+#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
+
+/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
+#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \
+ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
+ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
+ (head)->sph_root = tmp; \
+} while (0)
+
+#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \
+ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
+ SPLAY_LEFT(tmp, field) = (head)->sph_root; \
+ (head)->sph_root = tmp; \
+} while (0)
+
+#define SPLAY_LINKLEFT(head, tmp, field) do { \
+ SPLAY_LEFT(tmp, field) = (head)->sph_root; \
+ tmp = (head)->sph_root; \
+ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
+} while (0)
+
+#define SPLAY_LINKRIGHT(head, tmp, field) do { \
+ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
+ tmp = (head)->sph_root; \
+ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
+} while (0)
+
+#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \
+ SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
+ SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\
+ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
+ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
+} while (0)
+
+/* Generates prototypes and inline functions */
+
+#define SPLAY_PROTOTYPE(name, type, field, cmp) \
+void name##_SPLAY(struct name *, struct type *); \
+void name##_SPLAY_MINMAX(struct name *, int); \
+struct type *name##_SPLAY_INSERT(struct name *, struct type *); \
+struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \
+ \
+/* Finds the node with the same key as elm */ \
+static __inline struct type * \
+name##_SPLAY_FIND(struct name *head, struct type *elm) \
+{ \
+ if (SPLAY_EMPTY(head)) \
+ return(NULL); \
+ name##_SPLAY(head, elm); \
+ if ((cmp)(elm, (head)->sph_root) == 0) \
+ return (head->sph_root); \
+ return (NULL); \
+} \
+ \
+static __inline struct type * \
+name##_SPLAY_NEXT(struct name *head, struct type *elm) \
+{ \
+ name##_SPLAY(head, elm); \
+ if (SPLAY_RIGHT(elm, field) != NULL) { \
+ elm = SPLAY_RIGHT(elm, field); \
+ while (SPLAY_LEFT(elm, field) != NULL) { \
+ elm = SPLAY_LEFT(elm, field); \
+ } \
+ } else \
+ elm = NULL; \
+ return (elm); \
+} \
+ \
+static __inline struct type * \
+name##_SPLAY_MIN_MAX(struct name *head, int val) \
+{ \
+ name##_SPLAY_MINMAX(head, val); \
+ return (SPLAY_ROOT(head)); \
+}
+
+/* Main splay operation.
+ * Moves node close to the key of elm to top
+ */
+#define SPLAY_GENERATE(name, type, field, cmp) \
+struct type * \
+name##_SPLAY_INSERT(struct name *head, struct type *elm) \
+{ \
+ if (SPLAY_EMPTY(head)) { \
+ SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \
+ } else { \
+ int __comp; \
+ name##_SPLAY(head, elm); \
+ __comp = (cmp)(elm, (head)->sph_root); \
+ if(__comp < 0) { \
+ SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\
+ SPLAY_RIGHT(elm, field) = (head)->sph_root; \
+ SPLAY_LEFT((head)->sph_root, field) = NULL; \
+ } else if (__comp > 0) { \
+ SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\
+ SPLAY_LEFT(elm, field) = (head)->sph_root; \
+ SPLAY_RIGHT((head)->sph_root, field) = NULL; \
+ } else \
+ return ((head)->sph_root); \
+ } \
+ (head)->sph_root = (elm); \
+ return (NULL); \
+} \
+ \
+struct type * \
+name##_SPLAY_REMOVE(struct name *head, struct type *elm) \
+{ \
+ struct type *__tmp; \
+ if (SPLAY_EMPTY(head)) \
+ return (NULL); \
+ name##_SPLAY(head, elm); \
+ if ((cmp)(elm, (head)->sph_root) == 0) { \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
+ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\
+ } else { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\
+ name##_SPLAY(head, elm); \
+ SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
+ } \
+ return (elm); \
+ } \
+ return (NULL); \
+} \
+ \
+void \
+name##_SPLAY(struct name *head, struct type *elm) \
+{ \
+ struct type __node, *__left, *__right, *__tmp; \
+ int __comp; \
+\
+ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
+ __left = __right = &__node; \
+\
+ while ((__comp = (cmp)(elm, (head)->sph_root))) { \
+ if (__comp < 0) { \
+ __tmp = SPLAY_LEFT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if ((cmp)(elm, __tmp) < 0){ \
+ SPLAY_ROTATE_RIGHT(head, __tmp, field); \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKLEFT(head, __right, field); \
+ } else if (__comp > 0) { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if ((cmp)(elm, __tmp) > 0){ \
+ SPLAY_ROTATE_LEFT(head, __tmp, field); \
+ if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKRIGHT(head, __left, field); \
+ } \
+ } \
+ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
+} \
+ \
+/* Splay with either the minimum or the maximum element \
+ * Used to find minimum or maximum element in tree. \
+ */ \
+void name##_SPLAY_MINMAX(struct name *head, int __comp) \
+{ \
+ struct type __node, *__left, *__right, *__tmp; \
+\
+ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
+ __left = __right = &__node; \
+\
+ while (1) { \
+ if (__comp < 0) { \
+ __tmp = SPLAY_LEFT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if (__comp < 0){ \
+ SPLAY_ROTATE_RIGHT(head, __tmp, field); \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKLEFT(head, __right, field); \
+ } else if (__comp > 0) { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if (__comp > 0) { \
+ SPLAY_ROTATE_LEFT(head, __tmp, field); \
+ if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKRIGHT(head, __left, field); \
+ } \
+ } \
+ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
+}
+
+#define SPLAY_NEGINF -1
+#define SPLAY_INF 1
+
+#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
+#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
+#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
+#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
+#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \
+ : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
+#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \
+ : name##_SPLAY_MIN_MAX(x, SPLAY_INF))
+
+#define SPLAY_FOREACH(x, name, head) \
+ for ((x) = SPLAY_MIN(name, head); \
+ (x) != NULL; \
+ (x) = SPLAY_NEXT(name, head, x))
+
+/* Macros that define a red-back tree */
+#define RB_HEAD(name, type) \
+struct name { \
+ struct type *rbh_root; /* root of the tree */ \
+}
+
+#define RB_INITIALIZER(root) \
+ { NULL }
+
+#define RB_INIT(root) do { \
+ (root)->rbh_root = NULL; \
+} while (0)
+
+#define RB_BLACK 0
+#define RB_RED 1
+#define RB_ENTRY(type) \
+struct { \
+ struct type *rbe_left; /* left element */ \
+ struct type *rbe_right; /* right element */ \
+ struct type *rbe_parent; /* parent element */ \
+ int rbe_color; /* node color */ \
+}
+
+#define RB_LEFT(elm, field) (elm)->field.rbe_left
+#define RB_RIGHT(elm, field) (elm)->field.rbe_right
+#define RB_PARENT(elm, field) (elm)->field.rbe_parent
+#define RB_COLOR(elm, field) (elm)->field.rbe_color
+#define RB_ROOT(head) (head)->rbh_root
+#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
+
+#define RB_SET(elm, parent, field) do { \
+ RB_PARENT(elm, field) = parent; \
+ RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
+ RB_COLOR(elm, field) = RB_RED; \
+} while (0)
+
+#define RB_SET_BLACKRED(black, red, field) do { \
+ RB_COLOR(black, field) = RB_BLACK; \
+ RB_COLOR(red, field) = RB_RED; \
+} while (0)
+
+#ifndef RB_AUGMENT
+#define RB_AUGMENT(x)
+#endif
+
+#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \
+ (tmp) = RB_RIGHT(elm, field); \
+ if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field))) { \
+ RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
+ } \
+ RB_AUGMENT(elm); \
+ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field))) { \
+ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
+ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
+ else \
+ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
+ } else \
+ (head)->rbh_root = (tmp); \
+ RB_LEFT(tmp, field) = (elm); \
+ RB_PARENT(elm, field) = (tmp); \
+ RB_AUGMENT(tmp); \
+ if ((RB_PARENT(tmp, field))) \
+ RB_AUGMENT(RB_PARENT(tmp, field)); \
+} while (0)
+
+#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \
+ (tmp) = RB_LEFT(elm, field); \
+ if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field))) { \
+ RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
+ } \
+ RB_AUGMENT(elm); \
+ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field))) { \
+ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
+ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
+ else \
+ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
+ } else \
+ (head)->rbh_root = (tmp); \
+ RB_RIGHT(tmp, field) = (elm); \
+ RB_PARENT(elm, field) = (tmp); \
+ RB_AUGMENT(tmp); \
+ if ((RB_PARENT(tmp, field))) \
+ RB_AUGMENT(RB_PARENT(tmp, field)); \
+} while (0)
+
+/* Generates prototypes and inline functions */
+#define RB_PROTOTYPE(name, type, field, cmp) \
+void name##_RB_INSERT_COLOR(struct name *, struct type *); \
+void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\
+struct type *name##_RB_REMOVE(struct name *, struct type *); \
+struct type *name##_RB_INSERT(struct name *, struct type *); \
+struct type *name##_RB_FIND(struct name *, struct type *); \
+struct type *name##_RB_NEXT(struct type *); \
+struct type *name##_RB_MINMAX(struct name *, int); \
+ \
+
+/* Main rb operation.
+ * Moves node close to the key of elm to top
+ */
+#define RB_GENERATE(name, type, field, cmp) \
+void \
+name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \
+{ \
+ struct type *parent, *gparent, *tmp; \
+ while ((parent = RB_PARENT(elm, field)) && \
+ RB_COLOR(parent, field) == RB_RED) { \
+ gparent = RB_PARENT(parent, field); \
+ if (parent == RB_LEFT(gparent, field)) { \
+ tmp = RB_RIGHT(gparent, field); \
+ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
+ RB_COLOR(tmp, field) = RB_BLACK; \
+ RB_SET_BLACKRED(parent, gparent, field);\
+ elm = gparent; \
+ continue; \
+ } \
+ if (RB_RIGHT(parent, field) == elm) { \
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ tmp = parent; \
+ parent = elm; \
+ elm = tmp; \
+ } \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ RB_ROTATE_RIGHT(head, gparent, tmp, field); \
+ } else { \
+ tmp = RB_LEFT(gparent, field); \
+ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
+ RB_COLOR(tmp, field) = RB_BLACK; \
+ RB_SET_BLACKRED(parent, gparent, field);\
+ elm = gparent; \
+ continue; \
+ } \
+ if (RB_LEFT(parent, field) == elm) { \
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ tmp = parent; \
+ parent = elm; \
+ elm = tmp; \
+ } \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ RB_ROTATE_LEFT(head, gparent, tmp, field); \
+ } \
+ } \
+ RB_COLOR(head->rbh_root, field) = RB_BLACK; \
+} \
+ \
+void \
+name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \
+{ \
+ struct type *tmp; \
+ while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \
+ elm != RB_ROOT(head)) { \
+ if (RB_LEFT(parent, field) == elm) { \
+ tmp = RB_RIGHT(parent, field); \
+ if (RB_COLOR(tmp, field) == RB_RED) { \
+ RB_SET_BLACKRED(tmp, parent, field); \
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ tmp = RB_RIGHT(parent, field); \
+ } \
+ if ((RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
+ (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
+ RB_COLOR(tmp, field) = RB_RED; \
+ elm = parent; \
+ parent = RB_PARENT(elm, field); \
+ } else { \
+ if (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {\
+ struct type *oleft; \
+ if ((oleft = RB_LEFT(tmp, field)))\
+ RB_COLOR(oleft, field) = RB_BLACK;\
+ RB_COLOR(tmp, field) = RB_RED; \
+ RB_ROTATE_RIGHT(head, tmp, oleft, field);\
+ tmp = RB_RIGHT(parent, field); \
+ } \
+ RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
+ RB_COLOR(parent, field) = RB_BLACK; \
+ if (RB_RIGHT(tmp, field)) \
+ RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;\
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ elm = RB_ROOT(head); \
+ break; \
+ } \
+ } else { \
+ tmp = RB_LEFT(parent, field); \
+ if (RB_COLOR(tmp, field) == RB_RED) { \
+ RB_SET_BLACKRED(tmp, parent, field); \
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ tmp = RB_LEFT(parent, field); \
+ } \
+ if ((RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
+ (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
+ RB_COLOR(tmp, field) = RB_RED; \
+ elm = parent; \
+ parent = RB_PARENT(elm, field); \
+ } else { \
+ if (RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {\
+ struct type *oright; \
+ if ((oright = RB_RIGHT(tmp, field)))\
+ RB_COLOR(oright, field) = RB_BLACK;\
+ RB_COLOR(tmp, field) = RB_RED; \
+ RB_ROTATE_LEFT(head, tmp, oright, field);\
+ tmp = RB_LEFT(parent, field); \
+ } \
+ RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
+ RB_COLOR(parent, field) = RB_BLACK; \
+ if (RB_LEFT(tmp, field)) \
+ RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;\
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ elm = RB_ROOT(head); \
+ break; \
+ } \
+ } \
+ } \
+ if (elm) \
+ RB_COLOR(elm, field) = RB_BLACK; \
+} \
+ \
+struct type * \
+name##_RB_REMOVE(struct name *head, struct type *elm) \
+{ \
+ struct type *child, *parent, *old = elm; \
+ int color; \
+ if (RB_LEFT(elm, field) == NULL) \
+ child = RB_RIGHT(elm, field); \
+ else if (RB_RIGHT(elm, field) == NULL) \
+ child = RB_LEFT(elm, field); \
+ else { \
+ struct type *left; \
+ elm = RB_RIGHT(elm, field); \
+ while ((left = RB_LEFT(elm, field))) \
+ elm = left; \
+ child = RB_RIGHT(elm, field); \
+ parent = RB_PARENT(elm, field); \
+ color = RB_COLOR(elm, field); \
+ if (child) \
+ RB_PARENT(child, field) = parent; \
+ if (parent) { \
+ if (RB_LEFT(parent, field) == elm) \
+ RB_LEFT(parent, field) = child; \
+ else \
+ RB_RIGHT(parent, field) = child; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = child; \
+ if (RB_PARENT(elm, field) == old) \
+ parent = elm; \
+ (elm)->field = (old)->field; \
+ if (RB_PARENT(old, field)) { \
+ if (RB_LEFT(RB_PARENT(old, field), field) == old)\
+ RB_LEFT(RB_PARENT(old, field), field) = elm;\
+ else \
+ RB_RIGHT(RB_PARENT(old, field), field) = elm;\
+ RB_AUGMENT(RB_PARENT(old, field)); \
+ } else \
+ RB_ROOT(head) = elm; \
+ RB_PARENT(RB_LEFT(old, field), field) = elm; \
+ if (RB_RIGHT(old, field)) \
+ RB_PARENT(RB_RIGHT(old, field), field) = elm; \
+ if (parent) { \
+ left = parent; \
+ do { \
+ RB_AUGMENT(left); \
+ } while ((left = RB_PARENT(left, field))); \
+ } \
+ goto color; \
+ } \
+ parent = RB_PARENT(elm, field); \
+ color = RB_COLOR(elm, field); \
+ if (child) \
+ RB_PARENT(child, field) = parent; \
+ if (parent) { \
+ if (RB_LEFT(parent, field) == elm) \
+ RB_LEFT(parent, field) = child; \
+ else \
+ RB_RIGHT(parent, field) = child; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = child; \
+color: \
+ if (color == RB_BLACK) \
+ name##_RB_REMOVE_COLOR(head, parent, child); \
+ return (old); \
+} \
+ \
+/* Inserts a node into the RB tree */ \
+struct type * \
+name##_RB_INSERT(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp; \
+ struct type *parent = NULL; \
+ int comp = 0; \
+ tmp = RB_ROOT(head); \
+ while (tmp) { \
+ parent = tmp; \
+ comp = (cmp)(elm, parent); \
+ if (comp < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ RB_SET(elm, parent, field); \
+ if (parent != NULL) { \
+ if (comp < 0) \
+ RB_LEFT(parent, field) = elm; \
+ else \
+ RB_RIGHT(parent, field) = elm; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = elm; \
+ name##_RB_INSERT_COLOR(head, elm); \
+ return (NULL); \
+} \
+ \
+/* Finds the node with the same key as elm */ \
+struct type * \
+name##_RB_FIND(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ int comp; \
+ while (tmp) { \
+ comp = cmp(elm, tmp); \
+ if (comp < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ return (NULL); \
+} \
+ \
+struct type * \
+name##_RB_NEXT(struct type *elm) \
+{ \
+ if (RB_RIGHT(elm, field)) { \
+ elm = RB_RIGHT(elm, field); \
+ while (RB_LEFT(elm, field)) \
+ elm = RB_LEFT(elm, field); \
+ } else { \
+ if (RB_PARENT(elm, field) && \
+ (elm == RB_LEFT(RB_PARENT(elm, field), field))) \
+ elm = RB_PARENT(elm, field); \
+ else { \
+ while (RB_PARENT(elm, field) && \
+ (elm == RB_RIGHT(RB_PARENT(elm, field), field)))\
+ elm = RB_PARENT(elm, field); \
+ elm = RB_PARENT(elm, field); \
+ } \
+ } \
+ return (elm); \
+} \
+ \
+struct type * \
+name##_RB_MINMAX(struct name *head, int val) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ struct type *parent = NULL; \
+ while (tmp) { \
+ parent = tmp; \
+ if (val < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else \
+ tmp = RB_RIGHT(tmp, field); \
+ } \
+ return (parent); \
+}
+
+#define RB_NEGINF -1
+#define RB_INF 1
+
+#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
+#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
+#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
+#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
+#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
+#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
+
+#define RB_FOREACH(x, name, head) \
+ for ((x) = RB_MIN(name, head); \
+ (x) != NULL; \
+ (x) = name##_RB_NEXT(x))
+
+#endif /* _SYS_TREE_H_ */
diff --git a/libs/libevent/src/arc4random.c b/libs/libevent/src/arc4random.c
new file mode 100644
index 0000000000..a2338e692a
--- /dev/null
+++ b/libs/libevent/src/arc4random.c
@@ -0,0 +1,556 @@
+/* Portable arc4random.c based on arc4random.c from OpenBSD.
+ * Portable version by Chris Davis, adapted for Libevent by Nick Mathewson
+ * Copyright (c) 2010 Chris Davis, Niels Provos, and Nick Mathewson
+ * Copyright (c) 2010-2012 Niels Provos and Nick Mathewson
+ *
+ * Note that in Libevent, this file isn't compiled directly. Instead,
+ * it's included from evutil_rand.c
+ */
+
+/*
+ * Copyright (c) 1996, David Mazieres <dm@uun.org>
+ * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Arc4 random number generator for OpenBSD.
+ *
+ * This code is derived from section 17.1 of Applied Cryptography,
+ * second edition, which describes a stream cipher allegedly
+ * compatible with RSA Labs "RC4" cipher (the actual description of
+ * which is a trade secret). The same algorithm is used as a stream
+ * cipher called "arcfour" in Tatu Ylonen's ssh package.
+ *
+ * Here the stream cipher has been modified always to include the time
+ * when initializing the state. That makes it impossible to
+ * regenerate the same random sequence twice, so this can't be used
+ * for encryption, but will generate good random numbers.
+ *
+ * RC4 is a registered trademark of RSA Laboratories.
+ */
+
+#ifndef ARC4RANDOM_EXPORT
+#define ARC4RANDOM_EXPORT
+#endif
+
+#ifndef ARC4RANDOM_UINT32
+#define ARC4RANDOM_UINT32 uint32_t
+#endif
+
+#ifndef ARC4RANDOM_NO_INCLUDES
+#include "evconfig-private.h"
+#ifdef _WIN32
+#include <wincrypt.h>
+#include <process.h>
+#else
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/param.h>
+#include <sys/time.h>
+#ifdef EVENT__HAVE_SYS_SYSCTL_H
+#include <sys/sysctl.h>
+#endif
+#endif
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+/* Add platform entropy 32 bytes (256 bits) at a time. */
+#define ADD_ENTROPY 32
+
+/* Re-seed from the platform RNG after generating this many bytes. */
+#define BYTES_BEFORE_RESEED 1600000
+
+struct arc4_stream {
+ unsigned char i;
+ unsigned char j;
+ unsigned char s[256];
+};
+
+#ifdef _WIN32
+#define getpid _getpid
+#define pid_t int
+#endif
+
+static int rs_initialized;
+static struct arc4_stream rs;
+static pid_t arc4_stir_pid;
+static int arc4_count;
+static int arc4_seeded_ok;
+
+static inline unsigned char arc4_getbyte(void);
+
+static inline void
+arc4_init(void)
+{
+ int n;
+
+ for (n = 0; n < 256; n++)
+ rs.s[n] = n;
+ rs.i = 0;
+ rs.j = 0;
+}
+
+static inline void
+arc4_addrandom(const unsigned char *dat, int datlen)
+{
+ int n;
+ unsigned char si;
+
+ rs.i--;
+ for (n = 0; n < 256; n++) {
+ rs.i = (rs.i + 1);
+ si = rs.s[rs.i];
+ rs.j = (rs.j + si + dat[n % datlen]);
+ rs.s[rs.i] = rs.s[rs.j];
+ rs.s[rs.j] = si;
+ }
+ rs.j = rs.i;
+}
+
+#ifndef _WIN32
+static ssize_t
+read_all(int fd, unsigned char *buf, size_t count)
+{
+ size_t numread = 0;
+ ssize_t result;
+
+ while (numread < count) {
+ result = read(fd, buf+numread, count-numread);
+ if (result<0)
+ return -1;
+ else if (result == 0)
+ break;
+ numread += result;
+ }
+
+ return (ssize_t)numread;
+}
+#endif
+
+#ifdef _WIN32
+#define TRY_SEED_WIN32
+static int
+arc4_seed_win32(void)
+{
+ /* This is adapted from Tor's crypto_seed_rng() */
+ static int provider_set = 0;
+ static HCRYPTPROV provider;
+ unsigned char buf[ADD_ENTROPY];
+
+ if (!provider_set) {
+ if (!CryptAcquireContext(&provider, NULL, NULL, PROV_RSA_FULL,
+ CRYPT_VERIFYCONTEXT)) {
+ if (GetLastError() != (DWORD)NTE_BAD_KEYSET)
+ return -1;
+ }
+ provider_set = 1;
+ }
+ if (!CryptGenRandom(provider, sizeof(buf), buf))
+ return -1;
+ arc4_addrandom(buf, sizeof(buf));
+ evutil_memclear_(buf, sizeof(buf));
+ arc4_seeded_ok = 1;
+ return 0;
+}
+#endif
+
+#if defined(EVENT__HAVE_SYS_SYSCTL_H) && defined(EVENT__HAVE_SYSCTL)
+#if EVENT__HAVE_DECL_CTL_KERN && EVENT__HAVE_DECL_KERN_RANDOM && EVENT__HAVE_DECL_RANDOM_UUID
+#define TRY_SEED_SYSCTL_LINUX
+static int
+arc4_seed_sysctl_linux(void)
+{
+ /* Based on code by William Ahern, this function tries to use the
+ * RANDOM_UUID sysctl to get entropy from the kernel. This can work
+ * even if /dev/urandom is inaccessible for some reason (e.g., we're
+ * running in a chroot). */
+ int mib[] = { CTL_KERN, KERN_RANDOM, RANDOM_UUID };
+ unsigned char buf[ADD_ENTROPY];
+ size_t len, n;
+ unsigned i;
+ int any_set;
+
+ memset(buf, 0, sizeof(buf));
+
+ for (len = 0; len < sizeof(buf); len += n) {
+ n = sizeof(buf) - len;
+
+ if (0 != sysctl(mib, 3, &buf[len], &n, NULL, 0))
+ return -1;
+ }
+ /* make sure that the buffer actually got set. */
+ for (i=0,any_set=0; i<sizeof(buf); ++i) {
+ any_set |= buf[i];
+ }
+ if (!any_set)
+ return -1;
+
+ arc4_addrandom(buf, sizeof(buf));
+ evutil_memclear_(buf, sizeof(buf));
+ arc4_seeded_ok = 1;
+ return 0;
+}
+#endif
+
+#if EVENT__HAVE_DECL_CTL_KERN && EVENT__HAVE_DECL_KERN_ARND
+#define TRY_SEED_SYSCTL_BSD
+static int
+arc4_seed_sysctl_bsd(void)
+{
+ /* Based on code from William Ahern and from OpenBSD, this function
+ * tries to use the KERN_ARND syscall to get entropy from the kernel.
+ * This can work even if /dev/urandom is inaccessible for some reason
+ * (e.g., we're running in a chroot). */
+ int mib[] = { CTL_KERN, KERN_ARND };
+ unsigned char buf[ADD_ENTROPY];
+ size_t len, n;
+ int i, any_set;
+
+ memset(buf, 0, sizeof(buf));
+
+ len = sizeof(buf);
+ if (sysctl(mib, 2, buf, &len, NULL, 0) == -1) {
+ for (len = 0; len < sizeof(buf); len += sizeof(unsigned)) {
+ n = sizeof(unsigned);
+ if (n + len > sizeof(buf))
+ n = len - sizeof(buf);
+ if (sysctl(mib, 2, &buf[len], &n, NULL, 0) == -1)
+ return -1;
+ }
+ }
+ /* make sure that the buffer actually got set. */
+ for (i=any_set=0; i<sizeof(buf); ++i) {
+ any_set |= buf[i];
+ }
+ if (!any_set)
+ return -1;
+
+ arc4_addrandom(buf, sizeof(buf));
+ evutil_memclear_(buf, sizeof(buf));
+ arc4_seeded_ok = 1;
+ return 0;
+}
+#endif
+#endif /* defined(EVENT__HAVE_SYS_SYSCTL_H) */
+
+#ifdef __linux__
+#define TRY_SEED_PROC_SYS_KERNEL_RANDOM_UUID
+static int
+arc4_seed_proc_sys_kernel_random_uuid(void)
+{
+ /* Occasionally, somebody will make /proc/sys accessible in a chroot,
+ * but not /dev/urandom. Let's try /proc/sys/kernel/random/uuid.
+ * Its format is stupid, so we need to decode it from hex.
+ */
+ int fd;
+ char buf[128];
+ unsigned char entropy[64];
+ int bytes, n, i, nybbles;
+ for (bytes = 0; bytes<ADD_ENTROPY; ) {
+ fd = evutil_open_closeonexec_("/proc/sys/kernel/random/uuid", O_RDONLY, 0);
+ if (fd < 0)
+ return -1;
+ n = read(fd, buf, sizeof(buf));
+ close(fd);
+ if (n<=0)
+ return -1;
+ memset(entropy, 0, sizeof(entropy));
+ for (i=nybbles=0; i<n; ++i) {
+ if (EVUTIL_ISXDIGIT_(buf[i])) {
+ int nyb = evutil_hex_char_to_int_(buf[i]);
+ if (nybbles & 1) {
+ entropy[nybbles/2] |= nyb;
+ } else {
+ entropy[nybbles/2] |= nyb<<4;
+ }
+ ++nybbles;
+ }
+ }
+ if (nybbles < 2)
+ return -1;
+ arc4_addrandom(entropy, nybbles/2);
+ bytes += nybbles/2;
+ }
+ evutil_memclear_(entropy, sizeof(entropy));
+ evutil_memclear_(buf, sizeof(buf));
+ arc4_seeded_ok = 1;
+ return 0;
+}
+#endif
+
+#ifndef _WIN32
+#define TRY_SEED_URANDOM
+static char *arc4random_urandom_filename = NULL;
+
+static int arc4_seed_urandom_helper_(const char *fname)
+{
+ unsigned char buf[ADD_ENTROPY];
+ int fd;
+ size_t n;
+
+ fd = evutil_open_closeonexec_(fname, O_RDONLY, 0);
+ if (fd<0)
+ return -1;
+ n = read_all(fd, buf, sizeof(buf));
+ close(fd);
+ if (n != sizeof(buf))
+ return -1;
+ arc4_addrandom(buf, sizeof(buf));
+ evutil_memclear_(buf, sizeof(buf));
+ arc4_seeded_ok = 1;
+ return 0;
+}
+
+static int
+arc4_seed_urandom(void)
+{
+ /* This is adapted from Tor's crypto_seed_rng() */
+ static const char *filenames[] = {
+ "/dev/srandom", "/dev/urandom", "/dev/random", NULL
+ };
+ int i;
+ if (arc4random_urandom_filename)
+ return arc4_seed_urandom_helper_(arc4random_urandom_filename);
+
+ for (i = 0; filenames[i]; ++i) {
+ if (arc4_seed_urandom_helper_(filenames[i]) == 0) {
+ return 0;
+ }
+ }
+
+ return -1;
+}
+#endif
+
+static int
+arc4_seed(void)
+{
+ int ok = 0;
+ /* We try every method that might work, and don't give up even if one
+ * does seem to work. There's no real harm in over-seeding, and if
+ * one of these sources turns out to be broken, that would be bad. */
+#ifdef TRY_SEED_WIN32
+ if (0 == arc4_seed_win32())
+ ok = 1;
+#endif
+#ifdef TRY_SEED_URANDOM
+ if (0 == arc4_seed_urandom())
+ ok = 1;
+#endif
+#ifdef TRY_SEED_PROC_SYS_KERNEL_RANDOM_UUID
+ if (arc4random_urandom_filename == NULL &&
+ 0 == arc4_seed_proc_sys_kernel_random_uuid())
+ ok = 1;
+#endif
+#ifdef TRY_SEED_SYSCTL_LINUX
+ /* Apparently Linux is deprecating sysctl, and spewing warning
+ * messages when you try to use it. */
+ if (!ok && 0 == arc4_seed_sysctl_linux())
+ ok = 1;
+#endif
+#ifdef TRY_SEED_SYSCTL_BSD
+ if (0 == arc4_seed_sysctl_bsd())
+ ok = 1;
+#endif
+ return ok ? 0 : -1;
+}
+
+static int
+arc4_stir(void)
+{
+ int i;
+
+ if (!rs_initialized) {
+ arc4_init();
+ rs_initialized = 1;
+ }
+
+ arc4_seed();
+ if (!arc4_seeded_ok)
+ return -1;
+
+ /*
+ * Discard early keystream, as per recommendations in
+ * "Weaknesses in the Key Scheduling Algorithm of RC4" by
+ * Scott Fluhrer, Itsik Mantin, and Adi Shamir.
+ * http://www.wisdom.weizmann.ac.il/~itsik/RC4/Papers/Rc4_ksa.ps
+ *
+ * Ilya Mironov's "(Not So) Random Shuffles of RC4" suggests that
+ * we drop at least 2*256 bytes, with 12*256 as a conservative
+ * value.
+ *
+ * RFC4345 says to drop 6*256.
+ *
+ * At least some versions of this code drop 4*256, in a mistaken
+ * belief that "words" in the Fluhrer/Mantin/Shamir paper refers
+ * to processor words.
+ *
+ * We add another sect to the cargo cult, and choose 12*256.
+ */
+ for (i = 0; i < 12*256; i++)
+ (void)arc4_getbyte();
+
+ arc4_count = BYTES_BEFORE_RESEED;
+
+ return 0;
+}
+
+
+static void
+arc4_stir_if_needed(void)
+{
+ pid_t pid = getpid();
+
+ if (arc4_count <= 0 || !rs_initialized || arc4_stir_pid != pid)
+ {
+ arc4_stir_pid = pid;
+ arc4_stir();
+ }
+}
+
+static inline unsigned char
+arc4_getbyte(void)
+{
+ unsigned char si, sj;
+
+ rs.i = (rs.i + 1);
+ si = rs.s[rs.i];
+ rs.j = (rs.j + si);
+ sj = rs.s[rs.j];
+ rs.s[rs.i] = sj;
+ rs.s[rs.j] = si;
+ return (rs.s[(si + sj) & 0xff]);
+}
+
+static inline unsigned int
+arc4_getword(void)
+{
+ unsigned int val;
+
+ val = arc4_getbyte() << 24;
+ val |= arc4_getbyte() << 16;
+ val |= arc4_getbyte() << 8;
+ val |= arc4_getbyte();
+
+ return val;
+}
+
+#ifndef ARC4RANDOM_NOSTIR
+ARC4RANDOM_EXPORT int
+arc4random_stir(void)
+{
+ int val;
+ ARC4_LOCK_();
+ val = arc4_stir();
+ ARC4_UNLOCK_();
+ return val;
+}
+#endif
+
+#ifndef ARC4RANDOM_NOADDRANDOM
+ARC4RANDOM_EXPORT void
+arc4random_addrandom(const unsigned char *dat, int datlen)
+{
+ int j;
+ ARC4_LOCK_();
+ if (!rs_initialized)
+ arc4_stir();
+ for (j = 0; j < datlen; j += 256) {
+ /* arc4_addrandom() ignores all but the first 256 bytes of
+ * its input. We want to make sure to look at ALL the
+ * data in 'dat', just in case the user is doing something
+ * crazy like passing us all the files in /var/log. */
+ arc4_addrandom(dat + j, datlen - j);
+ }
+ ARC4_UNLOCK_();
+}
+#endif
+
+#ifndef ARC4RANDOM_NORANDOM
+ARC4RANDOM_EXPORT ARC4RANDOM_UINT32
+arc4random(void)
+{
+ ARC4RANDOM_UINT32 val;
+ ARC4_LOCK_();
+ arc4_count -= 4;
+ arc4_stir_if_needed();
+ val = arc4_getword();
+ ARC4_UNLOCK_();
+ return val;
+}
+#endif
+
+ARC4RANDOM_EXPORT void
+arc4random_buf(void *buf_, size_t n)
+{
+ unsigned char *buf = buf_;
+ ARC4_LOCK_();
+ arc4_stir_if_needed();
+ while (n--) {
+ if (--arc4_count <= 0)
+ arc4_stir();
+ buf[n] = arc4_getbyte();
+ }
+ ARC4_UNLOCK_();
+}
+
+#ifndef ARC4RANDOM_NOUNIFORM
+/*
+ * Calculate a uniformly distributed random number less than upper_bound
+ * avoiding "modulo bias".
+ *
+ * Uniformity is achieved by generating new random numbers until the one
+ * returned is outside the range [0, 2**32 % upper_bound). This
+ * guarantees the selected random number will be inside
+ * [2**32 % upper_bound, 2**32) which maps back to [0, upper_bound)
+ * after reduction modulo upper_bound.
+ */
+ARC4RANDOM_EXPORT unsigned int
+arc4random_uniform(unsigned int upper_bound)
+{
+ ARC4RANDOM_UINT32 r, min;
+
+ if (upper_bound < 2)
+ return 0;
+
+#if (UINT_MAX > 0xffffffffUL)
+ min = 0x100000000UL % upper_bound;
+#else
+ /* Calculate (2**32 % upper_bound) avoiding 64-bit math */
+ if (upper_bound > 0x80000000)
+ min = 1 + ~upper_bound; /* 2**32 - upper_bound */
+ else {
+ /* (2**32 - (x * 2)) % x == 2**32 % x when x <= 2**31 */
+ min = ((0xffffffff - (upper_bound * 2)) + 1) % upper_bound;
+ }
+#endif
+
+ /*
+ * This could theoretically loop forever but each retry has
+ * p > 0.5 (worst case, usually far better) of selecting a
+ * number inside the range we need, so it should rarely need
+ * to re-roll.
+ */
+ for (;;) {
+ r = arc4random();
+ if (r >= min)
+ break;
+ }
+
+ return r % upper_bound;
+}
+#endif
diff --git a/libs/libevent/src/buffer.c b/libs/libevent/src/buffer.c
new file mode 100644
index 0000000000..7cca0e8a7d
--- /dev/null
+++ b/libs/libevent/src/buffer.c
@@ -0,0 +1,3439 @@
+/*
+ * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <windows.h>
+#include <io.h>
+#endif
+
+#ifdef EVENT__HAVE_VASPRINTF
+/* If we have vasprintf, we need to define _GNU_SOURCE before we include
+ * stdio.h. This comes from evconfig-private.h.
+ */
+#endif
+
+#include <sys/types.h>
+
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+
+#ifdef EVENT__HAVE_SYS_UIO_H
+#include <sys/uio.h>
+#endif
+
+#ifdef EVENT__HAVE_SYS_IOCTL_H
+#include <sys/ioctl.h>
+#endif
+
+#ifdef EVENT__HAVE_SYS_MMAN_H
+#include <sys/mman.h>
+#endif
+
+#ifdef EVENT__HAVE_SYS_SENDFILE_H
+#include <sys/sendfile.h>
+#endif
+#ifdef EVENT__HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef EVENT__HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <limits.h>
+
+#include "event2/event.h"
+#include "event2/buffer.h"
+#include "event2/buffer_compat.h"
+#include "event2/bufferevent.h"
+#include "event2/bufferevent_compat.h"
+#include "event2/bufferevent_struct.h"
+#include "event2/thread.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "util-internal.h"
+#include "evthread-internal.h"
+#include "evbuffer-internal.h"
+#include "bufferevent-internal.h"
+
+/* some systems do not have MAP_FAILED */
+#ifndef MAP_FAILED
+#define MAP_FAILED ((void *)-1)
+#endif
+
+/* send file support */
+#if defined(EVENT__HAVE_SYS_SENDFILE_H) && defined(EVENT__HAVE_SENDFILE) && defined(__linux__)
+#define USE_SENDFILE 1
+#define SENDFILE_IS_LINUX 1
+#elif defined(EVENT__HAVE_SENDFILE) && defined(__FreeBSD__)
+#define USE_SENDFILE 1
+#define SENDFILE_IS_FREEBSD 1
+#elif defined(EVENT__HAVE_SENDFILE) && defined(__APPLE__)
+#define USE_SENDFILE 1
+#define SENDFILE_IS_MACOSX 1
+#elif defined(EVENT__HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__)
+#define USE_SENDFILE 1
+#define SENDFILE_IS_SOLARIS 1
+#endif
+
+/* Mask of user-selectable callback flags. */
+#define EVBUFFER_CB_USER_FLAGS 0xffff
+/* Mask of all internal-use-only flags. */
+#define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000
+
+/* Flag set if the callback is using the cb_obsolete function pointer */
+#define EVBUFFER_CB_OBSOLETE 0x00040000
+
+/* evbuffer_chain support */
+#define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off)
+#define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \
+ 0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off))
+
+#define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0)
+#define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0)
+
+/* evbuffer_ptr support */
+#define PTR_NOT_FOUND(ptr) do { \
+ (ptr)->pos = -1; \
+ (ptr)->internal_.chain = NULL; \
+ (ptr)->internal_.pos_in_chain = 0; \
+} while (0)
+
+static void evbuffer_chain_align(struct evbuffer_chain *chain);
+static int evbuffer_chain_should_realign(struct evbuffer_chain *chain,
+ size_t datalen);
+static void evbuffer_deferred_callback(struct event_callback *cb, void *arg);
+static int evbuffer_ptr_memcmp(const struct evbuffer *buf,
+ const struct evbuffer_ptr *pos, const char *mem, size_t len);
+static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf,
+ size_t datlen);
+static int evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos,
+ size_t howfar);
+static int evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg);
+static inline void evbuffer_chain_incref(struct evbuffer_chain *chain);
+
+static struct evbuffer_chain *
+evbuffer_chain_new(size_t size)
+{
+ struct evbuffer_chain *chain;
+ size_t to_alloc;
+
+ if (size > EVBUFFER_CHAIN_MAX - EVBUFFER_CHAIN_SIZE)
+ return (NULL);
+
+ size += EVBUFFER_CHAIN_SIZE;
+
+ /* get the next largest memory that can hold the buffer */
+ if (size < EVBUFFER_CHAIN_MAX / 2) {
+ to_alloc = MIN_BUFFER_SIZE;
+ while (to_alloc < size) {
+ to_alloc <<= 1;
+ }
+ } else {
+ to_alloc = size;
+ }
+
+ /* we get everything in one chunk */
+ if ((chain = mm_malloc(to_alloc)) == NULL)
+ return (NULL);
+
+ memset(chain, 0, EVBUFFER_CHAIN_SIZE);
+
+ chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE;
+
+ /* this way we can manipulate the buffer to different addresses,
+ * which is required for mmap for example.
+ */
+ chain->buffer = EVBUFFER_CHAIN_EXTRA(unsigned char, chain);
+
+ chain->refcnt = 1;
+
+ return (chain);
+}
+
+static inline void
+evbuffer_chain_free(struct evbuffer_chain *chain)
+{
+ EVUTIL_ASSERT(chain->refcnt > 0);
+ if (--chain->refcnt > 0) {
+ /* chain is still referenced by other chains */
+ return;
+ }
+
+ if (CHAIN_PINNED(chain)) {
+ /* will get freed once no longer dangling */
+ chain->refcnt++;
+ chain->flags |= EVBUFFER_DANGLING;
+ return;
+ }
+
+ /* safe to release chain, it's either a referencing
+ * chain or all references to it have been freed */
+ if (chain->flags & EVBUFFER_REFERENCE) {
+ struct evbuffer_chain_reference *info =
+ EVBUFFER_CHAIN_EXTRA(
+ struct evbuffer_chain_reference,
+ chain);
+ if (info->cleanupfn)
+ (*info->cleanupfn)(chain->buffer,
+ chain->buffer_len,
+ info->extra);
+ }
+ if (chain->flags & EVBUFFER_FILESEGMENT) {
+ struct evbuffer_chain_file_segment *info =
+ EVBUFFER_CHAIN_EXTRA(
+ struct evbuffer_chain_file_segment,
+ chain);
+ if (info->segment) {
+#ifdef _WIN32
+ if (info->segment->is_mapping)
+ UnmapViewOfFile(chain->buffer);
+#endif
+ evbuffer_file_segment_free(info->segment);
+ }
+ }
+ if (chain->flags & EVBUFFER_MULTICAST) {
+ struct evbuffer_multicast_parent *info =
+ EVBUFFER_CHAIN_EXTRA(
+ struct evbuffer_multicast_parent,
+ chain);
+ /* referencing chain is being freed, decrease
+ * refcounts of source chain and associated
+ * evbuffer (which get freed once both reach
+ * zero) */
+ EVUTIL_ASSERT(info->source != NULL);
+ EVUTIL_ASSERT(info->parent != NULL);
+ EVBUFFER_LOCK(info->source);
+ evbuffer_chain_free(info->parent);
+ evbuffer_decref_and_unlock_(info->source);
+ }
+
+ mm_free(chain);
+}
+
+static void
+evbuffer_free_all_chains(struct evbuffer_chain *chain)
+{
+ struct evbuffer_chain *next;
+ for (; chain; chain = next) {
+ next = chain->next;
+ evbuffer_chain_free(chain);
+ }
+}
+
+#ifndef NDEBUG
+static int
+evbuffer_chains_all_empty(struct evbuffer_chain *chain)
+{
+ for (; chain; chain = chain->next) {
+ if (chain->off)
+ return 0;
+ }
+ return 1;
+}
+#else
+/* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid
+"unused variable" warnings. */
+static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) {
+ return 1;
+}
+#endif
+
+/* Free all trailing chains in 'buf' that are neither pinned nor empty, prior
+ * to replacing them all with a new chain. Return a pointer to the place
+ * where the new chain will go.
+ *
+ * Internal; requires lock. The caller must fix up buf->last and buf->first
+ * as needed; they might have been freed.
+ */
+static struct evbuffer_chain **
+evbuffer_free_trailing_empty_chains(struct evbuffer *buf)
+{
+ struct evbuffer_chain **ch = buf->last_with_datap;
+ /* Find the first victim chain. It might be *last_with_datap */
+ while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
+ ch = &(*ch)->next;
+ if (*ch) {
+ EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
+ evbuffer_free_all_chains(*ch);
+ *ch = NULL;
+ }
+ return ch;
+}
+
+/* Add a single chain 'chain' to the end of 'buf', freeing trailing empty
+ * chains as necessary. Requires lock. Does not schedule callbacks.
+ */
+static void
+evbuffer_chain_insert(struct evbuffer *buf,
+ struct evbuffer_chain *chain)
+{
+ ASSERT_EVBUFFER_LOCKED(buf);
+ if (*buf->last_with_datap == NULL) {
+ /* There are no chains data on the buffer at all. */
+ EVUTIL_ASSERT(buf->last_with_datap == &buf->first);
+ EVUTIL_ASSERT(buf->first == NULL);
+ buf->first = buf->last = chain;
+ } else {
+ struct evbuffer_chain **chp;
+ chp = evbuffer_free_trailing_empty_chains(buf);
+ *chp = chain;
+ if (chain->off)
+ buf->last_with_datap = chp;
+ buf->last = chain;
+ }
+ buf->total_len += chain->off;
+}
+
+static inline struct evbuffer_chain *
+evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen)
+{
+ struct evbuffer_chain *chain;
+ if ((chain = evbuffer_chain_new(datlen)) == NULL)
+ return NULL;
+ evbuffer_chain_insert(buf, chain);
+ return chain;
+}
+
+void
+evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag)
+{
+ EVUTIL_ASSERT((chain->flags & flag) == 0);
+ chain->flags |= flag;
+}
+
+void
+evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag)
+{
+ EVUTIL_ASSERT((chain->flags & flag) != 0);
+ chain->flags &= ~flag;
+ if (chain->flags & EVBUFFER_DANGLING)
+ evbuffer_chain_free(chain);
+}
+
+static inline void
+evbuffer_chain_incref(struct evbuffer_chain *chain)
+{
+ ++chain->refcnt;
+}
+
+struct evbuffer *
+evbuffer_new(void)
+{
+ struct evbuffer *buffer;
+
+ buffer = mm_calloc(1, sizeof(struct evbuffer));
+ if (buffer == NULL)
+ return (NULL);
+
+ LIST_INIT(&buffer->callbacks);
+ buffer->refcnt = 1;
+ buffer->last_with_datap = &buffer->first;
+
+ return (buffer);
+}
+
+int
+evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags)
+{
+ EVBUFFER_LOCK(buf);
+ buf->flags |= (ev_uint32_t)flags;
+ EVBUFFER_UNLOCK(buf);
+ return 0;
+}
+
+int
+evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags)
+{
+ EVBUFFER_LOCK(buf);
+ buf->flags &= ~(ev_uint32_t)flags;
+ EVBUFFER_UNLOCK(buf);
+ return 0;
+}
+
+void
+evbuffer_incref_(struct evbuffer *buf)
+{
+ EVBUFFER_LOCK(buf);
+ ++buf->refcnt;
+ EVBUFFER_UNLOCK(buf);
+}
+
+void
+evbuffer_incref_and_lock_(struct evbuffer *buf)
+{
+ EVBUFFER_LOCK(buf);
+ ++buf->refcnt;
+}
+
+int
+evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base)
+{
+ EVBUFFER_LOCK(buffer);
+ buffer->cb_queue = base;
+ buffer->deferred_cbs = 1;
+ event_deferred_cb_init_(&buffer->deferred,
+ event_base_get_npriorities(base) / 2,
+ evbuffer_deferred_callback, buffer);
+ EVBUFFER_UNLOCK(buffer);
+ return 0;
+}
+
+int
+evbuffer_enable_locking(struct evbuffer *buf, void *lock)
+{
+#ifdef EVENT__DISABLE_THREAD_SUPPORT
+ return -1;
+#else
+ if (buf->lock)
+ return -1;
+
+ if (!lock) {
+ EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ if (!lock)
+ return -1;
+ buf->lock = lock;
+ buf->own_lock = 1;
+ } else {
+ buf->lock = lock;
+ buf->own_lock = 0;
+ }
+
+ return 0;
+#endif
+}
+
+void
+evbuffer_set_parent_(struct evbuffer *buf, struct bufferevent *bev)
+{
+ EVBUFFER_LOCK(buf);
+ buf->parent = bev;
+ EVBUFFER_UNLOCK(buf);
+}
+
+static void
+evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred)
+{
+ struct evbuffer_cb_entry *cbent, *next;
+ struct evbuffer_cb_info info;
+ size_t new_size;
+ ev_uint32_t mask, masked_val;
+ int clear = 1;
+
+ if (running_deferred) {
+ mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
+ masked_val = EVBUFFER_CB_ENABLED;
+ } else if (buffer->deferred_cbs) {
+ mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
+ masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
+ /* Don't zero-out n_add/n_del, since the deferred callbacks
+ will want to see them. */
+ clear = 0;
+ } else {
+ mask = EVBUFFER_CB_ENABLED;
+ masked_val = EVBUFFER_CB_ENABLED;
+ }
+
+ ASSERT_EVBUFFER_LOCKED(buffer);
+
+ if (LIST_EMPTY(&buffer->callbacks)) {
+ buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
+ return;
+ }
+ if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0)
+ return;
+
+ new_size = buffer->total_len;
+ info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb;
+ info.n_added = buffer->n_add_for_cb;
+ info.n_deleted = buffer->n_del_for_cb;
+ if (clear) {
+ buffer->n_add_for_cb = 0;
+ buffer->n_del_for_cb = 0;
+ }
+ for (cbent = LIST_FIRST(&buffer->callbacks);
+ cbent != LIST_END(&buffer->callbacks);
+ cbent = next) {
+ /* Get the 'next' pointer now in case this callback decides
+ * to remove itself or something. */
+ next = LIST_NEXT(cbent, next);
+
+ if ((cbent->flags & mask) != masked_val)
+ continue;
+
+ if ((cbent->flags & EVBUFFER_CB_OBSOLETE))
+ cbent->cb.cb_obsolete(buffer,
+ info.orig_size, new_size, cbent->cbarg);
+ else
+ cbent->cb.cb_func(buffer, &info, cbent->cbarg);
+ }
+}
+
+void
+evbuffer_invoke_callbacks_(struct evbuffer *buffer)
+{
+ if (LIST_EMPTY(&buffer->callbacks)) {
+ buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
+ return;
+ }
+
+ if (buffer->deferred_cbs) {
+ if (event_deferred_cb_schedule_(buffer->cb_queue, &buffer->deferred)) {
+ evbuffer_incref_and_lock_(buffer);
+ if (buffer->parent)
+ bufferevent_incref_(buffer->parent);
+ }
+ EVBUFFER_UNLOCK(buffer);
+ }
+
+ evbuffer_run_callbacks(buffer, 0);
+}
+
+static void
+evbuffer_deferred_callback(struct event_callback *cb, void *arg)
+{
+ struct bufferevent *parent = NULL;
+ struct evbuffer *buffer = arg;
+
+ /* XXXX It would be better to run these callbacks without holding the
+ * lock */
+ EVBUFFER_LOCK(buffer);
+ parent = buffer->parent;
+ evbuffer_run_callbacks(buffer, 1);
+ evbuffer_decref_and_unlock_(buffer);
+ if (parent)
+ bufferevent_decref_(parent);
+}
+
+static void
+evbuffer_remove_all_callbacks(struct evbuffer *buffer)
+{
+ struct evbuffer_cb_entry *cbent;
+
+ while ((cbent = LIST_FIRST(&buffer->callbacks))) {
+ LIST_REMOVE(cbent, next);
+ mm_free(cbent);
+ }
+}
+
+void
+evbuffer_decref_and_unlock_(struct evbuffer *buffer)
+{
+ struct evbuffer_chain *chain, *next;
+ ASSERT_EVBUFFER_LOCKED(buffer);
+
+ EVUTIL_ASSERT(buffer->refcnt > 0);
+
+ if (--buffer->refcnt > 0) {
+ EVBUFFER_UNLOCK(buffer);
+ return;
+ }
+
+ for (chain = buffer->first; chain != NULL; chain = next) {
+ next = chain->next;
+ evbuffer_chain_free(chain);
+ }
+ evbuffer_remove_all_callbacks(buffer);
+ if (buffer->deferred_cbs)
+ event_deferred_cb_cancel_(buffer->cb_queue, &buffer->deferred);
+
+ EVBUFFER_UNLOCK(buffer);
+ if (buffer->own_lock)
+ EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ mm_free(buffer);
+}
+
+void
+evbuffer_free(struct evbuffer *buffer)
+{
+ EVBUFFER_LOCK(buffer);
+ evbuffer_decref_and_unlock_(buffer);
+}
+
+void
+evbuffer_lock(struct evbuffer *buf)
+{
+ EVBUFFER_LOCK(buf);
+}
+
+void
+evbuffer_unlock(struct evbuffer *buf)
+{
+ EVBUFFER_UNLOCK(buf);
+}
+
+size_t
+evbuffer_get_length(const struct evbuffer *buffer)
+{
+ size_t result;
+
+ EVBUFFER_LOCK(buffer);
+
+ result = (buffer->total_len);
+
+ EVBUFFER_UNLOCK(buffer);
+
+ return result;
+}
+
+size_t
+evbuffer_get_contiguous_space(const struct evbuffer *buf)
+{
+ struct evbuffer_chain *chain;
+ size_t result;
+
+ EVBUFFER_LOCK(buf);
+ chain = buf->first;
+ result = (chain != NULL ? chain->off : 0);
+ EVBUFFER_UNLOCK(buf);
+
+ return result;
+}
+
+size_t
+evbuffer_add_iovec(struct evbuffer * buf, struct evbuffer_iovec * vec, int n_vec) {
+ int n;
+ size_t res;
+ size_t to_alloc;
+
+ EVBUFFER_LOCK(buf);
+
+ res = to_alloc = 0;
+
+ for (n = 0; n < n_vec; n++) {
+ to_alloc += vec[n].iov_len;
+ }
+
+ if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) {
+ goto done;
+ }
+
+ for (n = 0; n < n_vec; n++) {
+ /* XXX each 'add' call here does a bunch of setup that's
+ * obviated by evbuffer_expand_fast_, and some cleanup that we
+ * would like to do only once. Instead we should just extract
+ * the part of the code that's needed. */
+
+ if (evbuffer_add(buf, vec[n].iov_base, vec[n].iov_len) < 0) {
+ goto done;
+ }
+
+ res += vec[n].iov_len;
+ }
+
+done:
+ EVBUFFER_UNLOCK(buf);
+ return res;
+}
+
+int
+evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,
+ struct evbuffer_iovec *vec, int n_vecs)
+{
+ struct evbuffer_chain *chain, **chainp;
+ int n = -1;
+
+ EVBUFFER_LOCK(buf);
+ if (buf->freeze_end)
+ goto done;
+ if (n_vecs < 1)
+ goto done;
+ if (n_vecs == 1) {
+ if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL)
+ goto done;
+
+ vec[0].iov_base = CHAIN_SPACE_PTR(chain);
+ vec[0].iov_len = (size_t) CHAIN_SPACE_LEN(chain);
+ EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size);
+ n = 1;
+ } else {
+ if (evbuffer_expand_fast_(buf, size, n_vecs)<0)
+ goto done;
+ n = evbuffer_read_setup_vecs_(buf, size, vec, n_vecs,
+ &chainp, 0);
+ }
+
+done:
+ EVBUFFER_UNLOCK(buf);
+ return n;
+
+}
+
+static int
+advance_last_with_data(struct evbuffer *buf)
+{
+ int n = 0;
+ ASSERT_EVBUFFER_LOCKED(buf);
+
+ if (!*buf->last_with_datap)
+ return 0;
+
+ while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) {
+ buf->last_with_datap = &(*buf->last_with_datap)->next;
+ ++n;
+ }
+ return n;
+}
+
+int
+evbuffer_commit_space(struct evbuffer *buf,
+ struct evbuffer_iovec *vec, int n_vecs)
+{
+ struct evbuffer_chain *chain, **firstchainp, **chainp;
+ int result = -1;
+ size_t added = 0;
+ int i;
+
+ EVBUFFER_LOCK(buf);
+
+ if (buf->freeze_end)
+ goto done;
+ if (n_vecs == 0) {
+ result = 0;
+ goto done;
+ } else if (n_vecs == 1 &&
+ (buf->last && vec[0].iov_base == (void*)CHAIN_SPACE_PTR(buf->last))) {
+ /* The user only got or used one chain; it might not
+ * be the first one with space in it. */
+ if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last))
+ goto done;
+ buf->last->off += vec[0].iov_len;
+ added = vec[0].iov_len;
+ if (added)
+ advance_last_with_data(buf);
+ goto okay;
+ }
+
+ /* Advance 'firstchain' to the first chain with space in it. */
+ firstchainp = buf->last_with_datap;
+ if (!*firstchainp)
+ goto done;
+ if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
+ firstchainp = &(*firstchainp)->next;
+ }
+
+ chain = *firstchainp;
+ /* pass 1: make sure that the pointers and lengths of vecs[] are in
+ * bounds before we try to commit anything. */
+ for (i=0; i<n_vecs; ++i) {
+ if (!chain)
+ goto done;
+ if (vec[i].iov_base != (void*)CHAIN_SPACE_PTR(chain) ||
+ (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain))
+ goto done;
+ chain = chain->next;
+ }
+ /* pass 2: actually adjust all the chains. */
+ chainp = firstchainp;
+ for (i=0; i<n_vecs; ++i) {
+ (*chainp)->off += vec[i].iov_len;
+ added += vec[i].iov_len;
+ if (vec[i].iov_len) {
+ buf->last_with_datap = chainp;
+ }
+ chainp = &(*chainp)->next;
+ }
+
+okay:
+ buf->total_len += added;
+ buf->n_add_for_cb += added;
+ result = 0;
+ evbuffer_invoke_callbacks_(buf);
+
+done:
+ EVBUFFER_UNLOCK(buf);
+ return result;
+}
+
+static inline int
+HAS_PINNED_R(struct evbuffer *buf)
+{
+ return (buf->last && CHAIN_PINNED_R(buf->last));
+}
+
+static inline void
+ZERO_CHAIN(struct evbuffer *dst)
+{
+ ASSERT_EVBUFFER_LOCKED(dst);
+ dst->first = NULL;
+ dst->last = NULL;
+ dst->last_with_datap = &(dst)->first;
+ dst->total_len = 0;
+}
+
+/* Prepares the contents of src to be moved to another buffer by removing
+ * read-pinned chains. The first pinned chain is saved in first, and the
+ * last in last. If src has no read-pinned chains, first and last are set
+ * to NULL. */
+static int
+PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first,
+ struct evbuffer_chain **last)
+{
+ struct evbuffer_chain *chain, **pinned;
+
+ ASSERT_EVBUFFER_LOCKED(src);
+
+ if (!HAS_PINNED_R(src)) {
+ *first = *last = NULL;
+ return 0;
+ }
+
+ pinned = src->last_with_datap;
+ if (!CHAIN_PINNED_R(*pinned))
+ pinned = &(*pinned)->next;
+ EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned));
+ chain = *first = *pinned;
+ *last = src->last;
+
+ /* If there's data in the first pinned chain, we need to allocate
+ * a new chain and copy the data over. */
+ if (chain->off) {
+ struct evbuffer_chain *tmp;
+
+ EVUTIL_ASSERT(pinned == src->last_with_datap);
+ tmp = evbuffer_chain_new(chain->off);
+ if (!tmp)
+ return -1;
+ memcpy(tmp->buffer, chain->buffer + chain->misalign,
+ chain->off);
+ tmp->off = chain->off;
+ *src->last_with_datap = tmp;
+ src->last = tmp;
+ chain->misalign += chain->off;
+ chain->off = 0;
+ } else {
+ src->last = *src->last_with_datap;
+ *pinned = NULL;
+ }
+
+ return 0;
+}
+
+static inline void
+RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned,
+ struct evbuffer_chain *last)
+{
+ ASSERT_EVBUFFER_LOCKED(src);
+
+ if (!pinned) {
+ ZERO_CHAIN(src);
+ return;
+ }
+
+ src->first = pinned;
+ src->last = last;
+ src->last_with_datap = &src->first;
+ src->total_len = 0;
+}
+
+static inline void
+COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src)
+{
+ ASSERT_EVBUFFER_LOCKED(dst);
+ ASSERT_EVBUFFER_LOCKED(src);
+ dst->first = src->first;
+ if (src->last_with_datap == &src->first)
+ dst->last_with_datap = &dst->first;
+ else
+ dst->last_with_datap = src->last_with_datap;
+ dst->last = src->last;
+ dst->total_len = src->total_len;
+}
+
+static void
+APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
+{
+ ASSERT_EVBUFFER_LOCKED(dst);
+ ASSERT_EVBUFFER_LOCKED(src);
+ dst->last->next = src->first;
+ if (src->last_with_datap == &src->first)
+ dst->last_with_datap = &dst->last->next;
+ else
+ dst->last_with_datap = src->last_with_datap;
+ dst->last = src->last;
+ dst->total_len += src->total_len;
+}
+
+static inline void
+APPEND_CHAIN_MULTICAST(struct evbuffer *dst, struct evbuffer *src)
+{
+ struct evbuffer_chain *tmp;
+ struct evbuffer_chain *chain = src->first;
+ struct evbuffer_multicast_parent *extra;
+
+ ASSERT_EVBUFFER_LOCKED(dst);
+ ASSERT_EVBUFFER_LOCKED(src);
+
+ for (; chain; chain = chain->next) {
+ if (!chain->off || chain->flags & EVBUFFER_DANGLING) {
+ /* skip empty chains */
+ continue;
+ }
+
+ tmp = evbuffer_chain_new(sizeof(struct evbuffer_multicast_parent));
+ if (!tmp) {
+ event_warn("%s: out of memory", __func__);
+ return;
+ }
+ extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_multicast_parent, tmp);
+ /* reference evbuffer containing source chain so it
+ * doesn't get released while the chain is still
+ * being referenced to */
+ evbuffer_incref_(src);
+ extra->source = src;
+ /* reference source chain which now becomes immutable */
+ evbuffer_chain_incref(chain);
+ extra->parent = chain;
+ chain->flags |= EVBUFFER_IMMUTABLE;
+ tmp->buffer_len = chain->buffer_len;
+ tmp->misalign = chain->misalign;
+ tmp->off = chain->off;
+ tmp->flags |= EVBUFFER_MULTICAST|EVBUFFER_IMMUTABLE;
+ tmp->buffer = chain->buffer;
+ evbuffer_chain_insert(dst, tmp);
+ }
+}
+
+static void
+PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
+{
+ ASSERT_EVBUFFER_LOCKED(dst);
+ ASSERT_EVBUFFER_LOCKED(src);
+ src->last->next = dst->first;
+ dst->first = src->first;
+ dst->total_len += src->total_len;
+ if (*dst->last_with_datap == NULL) {
+ if (src->last_with_datap == &(src)->first)
+ dst->last_with_datap = &dst->first;
+ else
+ dst->last_with_datap = src->last_with_datap;
+ } else if (dst->last_with_datap == &dst->first) {
+ dst->last_with_datap = &src->last->next;
+ }
+}
+
+int
+evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
+{
+ struct evbuffer_chain *pinned, *last;
+ size_t in_total_len, out_total_len;
+ int result = 0;
+
+ EVBUFFER_LOCK2(inbuf, outbuf);
+ in_total_len = inbuf->total_len;
+ out_total_len = outbuf->total_len;
+
+ if (in_total_len == 0 || outbuf == inbuf)
+ goto done;
+
+ if (outbuf->freeze_end || inbuf->freeze_start) {
+ result = -1;
+ goto done;
+ }
+
+ if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
+ result = -1;
+ goto done;
+ }
+
+ if (out_total_len == 0) {
+ /* There might be an empty chain at the start of outbuf; free
+ * it. */
+ evbuffer_free_all_chains(outbuf->first);
+ COPY_CHAIN(outbuf, inbuf);
+ } else {
+ APPEND_CHAIN(outbuf, inbuf);
+ }
+
+ RESTORE_PINNED(inbuf, pinned, last);
+
+ inbuf->n_del_for_cb += in_total_len;
+ outbuf->n_add_for_cb += in_total_len;
+
+ evbuffer_invoke_callbacks_(inbuf);
+ evbuffer_invoke_callbacks_(outbuf);
+
+done:
+ EVBUFFER_UNLOCK2(inbuf, outbuf);
+ return result;
+}
+
+int
+evbuffer_add_buffer_reference(struct evbuffer *outbuf, struct evbuffer *inbuf)
+{
+ size_t in_total_len, out_total_len;
+ struct evbuffer_chain *chain;
+ int result = 0;
+
+ EVBUFFER_LOCK2(inbuf, outbuf);
+ in_total_len = inbuf->total_len;
+ out_total_len = outbuf->total_len;
+ chain = inbuf->first;
+
+ if (in_total_len == 0)
+ goto done;
+
+ if (outbuf->freeze_end || outbuf == inbuf) {
+ result = -1;
+ goto done;
+ }
+
+ for (; chain; chain = chain->next) {
+ if ((chain->flags & (EVBUFFER_FILESEGMENT|EVBUFFER_SENDFILE|EVBUFFER_MULTICAST)) != 0) {
+ /* chain type can not be referenced */
+ result = -1;
+ goto done;
+ }
+ }
+
+ if (out_total_len == 0) {
+ /* There might be an empty chain at the start of outbuf; free
+ * it. */
+ evbuffer_free_all_chains(outbuf->first);
+ }
+ APPEND_CHAIN_MULTICAST(outbuf, inbuf);
+
+ outbuf->n_add_for_cb += in_total_len;
+ evbuffer_invoke_callbacks_(outbuf);
+
+done:
+ EVBUFFER_UNLOCK2(inbuf, outbuf);
+ return result;
+}
+
+int
+evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
+{
+ struct evbuffer_chain *pinned, *last;
+ size_t in_total_len, out_total_len;
+ int result = 0;
+
+ EVBUFFER_LOCK2(inbuf, outbuf);
+
+ in_total_len = inbuf->total_len;
+ out_total_len = outbuf->total_len;
+
+ if (!in_total_len || inbuf == outbuf)
+ goto done;
+
+ if (outbuf->freeze_start || inbuf->freeze_start) {
+ result = -1;
+ goto done;
+ }
+
+ if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
+ result = -1;
+ goto done;
+ }
+
+ if (out_total_len == 0) {
+ /* There might be an empty chain at the start of outbuf; free
+ * it. */
+ evbuffer_free_all_chains(outbuf->first);
+ COPY_CHAIN(outbuf, inbuf);
+ } else {
+ PREPEND_CHAIN(outbuf, inbuf);
+ }
+
+ RESTORE_PINNED(inbuf, pinned, last);
+
+ inbuf->n_del_for_cb += in_total_len;
+ outbuf->n_add_for_cb += in_total_len;
+
+ evbuffer_invoke_callbacks_(inbuf);
+ evbuffer_invoke_callbacks_(outbuf);
+done:
+ EVBUFFER_UNLOCK2(inbuf, outbuf);
+ return result;
+}
+
+int
+evbuffer_drain(struct evbuffer *buf, size_t len)
+{
+ struct evbuffer_chain *chain, *next;
+ size_t remaining, old_len;
+ int result = 0;
+
+ EVBUFFER_LOCK(buf);
+ old_len = buf->total_len;
+
+ if (old_len == 0)
+ goto done;
+
+ if (buf->freeze_start) {
+ result = -1;
+ goto done;
+ }
+
+ if (len >= old_len && !HAS_PINNED_R(buf)) {
+ len = old_len;
+ for (chain = buf->first; chain != NULL; chain = next) {
+ next = chain->next;
+ evbuffer_chain_free(chain);
+ }
+
+ ZERO_CHAIN(buf);
+ } else {
+ if (len >= old_len)
+ len = old_len;
+
+ buf->total_len -= len;
+ remaining = len;
+ for (chain = buf->first;
+ remaining >= chain->off;
+ chain = next) {
+ next = chain->next;
+ remaining -= chain->off;
+
+ if (chain == *buf->last_with_datap) {
+ buf->last_with_datap = &buf->first;
+ }
+ if (&chain->next == buf->last_with_datap)
+ buf->last_with_datap = &buf->first;
+
+ if (CHAIN_PINNED_R(chain)) {
+ EVUTIL_ASSERT(remaining == 0);
+ chain->misalign += chain->off;
+ chain->off = 0;
+ break;
+ } else
+ evbuffer_chain_free(chain);
+ }
+
+ buf->first = chain;
+ EVUTIL_ASSERT(chain && remaining <= chain->off);
+ chain->misalign += remaining;
+ chain->off -= remaining;
+ }
+
+ buf->n_del_for_cb += len;
+ /* Tell someone about changes in this buffer */
+ evbuffer_invoke_callbacks_(buf);
+
+done:
+ EVBUFFER_UNLOCK(buf);
+ return result;
+}
+
+/* Reads data from an event buffer and drains the bytes read */
+int
+evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen)
+{
+ ev_ssize_t n;
+ EVBUFFER_LOCK(buf);
+ n = evbuffer_copyout_from(buf, NULL, data_out, datlen);
+ if (n > 0) {
+ if (evbuffer_drain(buf, n)<0)
+ n = -1;
+ }
+ EVBUFFER_UNLOCK(buf);
+ return (int)n;
+}
+
+ev_ssize_t
+evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen)
+{
+ return evbuffer_copyout_from(buf, NULL, data_out, datlen);
+}
+
+ev_ssize_t
+evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos,
+ void *data_out, size_t datlen)
+{
+ /*XXX fails badly on sendfile case. */
+ struct evbuffer_chain *chain;
+ char *data = data_out;
+ size_t nread;
+ ev_ssize_t result = 0;
+ size_t pos_in_chain;
+
+ EVBUFFER_LOCK(buf);
+
+ if (pos) {
+ if (datlen > (size_t)(EV_SSIZE_MAX - pos->pos)) {
+ result = -1;
+ goto done;
+ }
+ chain = pos->internal_.chain;
+ pos_in_chain = pos->internal_.pos_in_chain;
+ if (datlen + pos->pos > buf->total_len)
+ datlen = buf->total_len - pos->pos;
+ } else {
+ chain = buf->first;
+ pos_in_chain = 0;
+ if (datlen > buf->total_len)
+ datlen = buf->total_len;
+ }
+
+
+ if (datlen == 0)
+ goto done;
+
+ if (buf->freeze_start) {
+ result = -1;
+ goto done;
+ }
+
+ nread = datlen;
+
+ while (datlen && datlen >= chain->off - pos_in_chain) {
+ size_t copylen = chain->off - pos_in_chain;
+ memcpy(data,
+ chain->buffer + chain->misalign + pos_in_chain,
+ copylen);
+ data += copylen;
+ datlen -= copylen;
+
+ chain = chain->next;
+ pos_in_chain = 0;
+ EVUTIL_ASSERT(chain || datlen==0);
+ }
+
+ if (datlen) {
+ EVUTIL_ASSERT(chain);
+ EVUTIL_ASSERT(datlen+pos_in_chain <= chain->off);
+
+ memcpy(data, chain->buffer + chain->misalign + pos_in_chain,
+ datlen);
+ }
+
+ result = nread;
+done:
+ EVBUFFER_UNLOCK(buf);
+ return result;
+}
+
+/* reads data from the src buffer to the dst buffer, avoids memcpy as
+ * possible. */
+/* XXXX should return ev_ssize_t */
+int
+evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst,
+ size_t datlen)
+{
+ /*XXX We should have an option to force this to be zero-copy.*/
+
+ /*XXX can fail badly on sendfile case. */
+ struct evbuffer_chain *chain, *previous;
+ size_t nread = 0;
+ int result;
+
+ EVBUFFER_LOCK2(src, dst);
+
+ chain = previous = src->first;
+
+ if (datlen == 0 || dst == src) {
+ result = 0;
+ goto done;
+ }
+
+ if (dst->freeze_end || src->freeze_start) {
+ result = -1;
+ goto done;
+ }
+
+ /* short-cut if there is no more data buffered */
+ if (datlen >= src->total_len) {
+ datlen = src->total_len;
+ evbuffer_add_buffer(dst, src);
+ result = (int)datlen; /*XXXX should return ev_ssize_t*/
+ goto done;
+ }
+
+ /* removes chains if possible */
+ while (chain->off <= datlen) {
+ /* We can't remove the last with data from src unless we
+ * remove all chains, in which case we would have done the if
+ * block above */
+ EVUTIL_ASSERT(chain != *src->last_with_datap);
+ nread += chain->off;
+ datlen -= chain->off;
+ previous = chain;
+ if (src->last_with_datap == &chain->next)
+ src->last_with_datap = &src->first;
+ chain = chain->next;
+ }
+
+ if (nread) {
+ /* we can remove the chain */
+ struct evbuffer_chain **chp;
+ chp = evbuffer_free_trailing_empty_chains(dst);
+
+ if (dst->first == NULL) {
+ dst->first = src->first;
+ } else {
+ *chp = src->first;
+ }
+ dst->last = previous;
+ previous->next = NULL;
+ src->first = chain;
+ advance_last_with_data(dst);
+
+ dst->total_len += nread;
+ dst->n_add_for_cb += nread;
+ }
+
+ /* we know that there is more data in the src buffer than
+ * we want to read, so we manually drain the chain */
+ evbuffer_add(dst, chain->buffer + chain->misalign, datlen);
+ chain->misalign += datlen;
+ chain->off -= datlen;
+ nread += datlen;
+
+ /* You might think we would want to increment dst->n_add_for_cb
+ * here too. But evbuffer_add above already took care of that.
+ */
+ src->total_len -= nread;
+ src->n_del_for_cb += nread;
+
+ if (nread) {
+ evbuffer_invoke_callbacks_(dst);
+ evbuffer_invoke_callbacks_(src);
+ }
+ result = (int)nread;/*XXXX should change return type */
+
+done:
+ EVBUFFER_UNLOCK2(src, dst);
+ return result;
+}
+
+unsigned char *
+evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size)
+{
+ struct evbuffer_chain *chain, *next, *tmp, *last_with_data;
+ unsigned char *buffer, *result = NULL;
+ ev_ssize_t remaining;
+ int removed_last_with_data = 0;
+ int removed_last_with_datap = 0;
+
+ EVBUFFER_LOCK(buf);
+
+ chain = buf->first;
+
+ if (size < 0)
+ size = buf->total_len;
+ /* if size > buf->total_len, we cannot guarantee to the user that she
+ * is going to have a long enough buffer afterwards; so we return
+ * NULL */
+ if (size == 0 || (size_t)size > buf->total_len)
+ goto done;
+
+ /* No need to pull up anything; the first size bytes are
+ * already here. */
+ if (chain->off >= (size_t)size) {
+ result = chain->buffer + chain->misalign;
+ goto done;
+ }
+
+ /* Make sure that none of the chains we need to copy from is pinned. */
+ remaining = size - chain->off;
+ EVUTIL_ASSERT(remaining >= 0);
+ for (tmp=chain->next; tmp; tmp=tmp->next) {
+ if (CHAIN_PINNED(tmp))
+ goto done;
+ if (tmp->off >= (size_t)remaining)
+ break;
+ remaining -= tmp->off;
+ }
+
+ if (CHAIN_PINNED(chain)) {
+ size_t old_off = chain->off;
+ if (CHAIN_SPACE_LEN(chain) < size - chain->off) {
+ /* not enough room at end of chunk. */
+ goto done;
+ }
+ buffer = CHAIN_SPACE_PTR(chain);
+ tmp = chain;
+ tmp->off = size;
+ size -= old_off;
+ chain = chain->next;
+ } else if (chain->buffer_len - chain->misalign >= (size_t)size) {
+ /* already have enough space in the first chain */
+ size_t old_off = chain->off;
+ buffer = chain->buffer + chain->misalign + chain->off;
+ tmp = chain;
+ tmp->off = size;
+ size -= old_off;
+ chain = chain->next;
+ } else {
+ if ((tmp = evbuffer_chain_new(size)) == NULL) {
+ event_warn("%s: out of memory", __func__);
+ goto done;
+ }
+ buffer = tmp->buffer;
+ tmp->off = size;
+ buf->first = tmp;
+ }
+
+ /* TODO(niels): deal with buffers that point to NULL like sendfile */
+
+ /* Copy and free every chunk that will be entirely pulled into tmp */
+ last_with_data = *buf->last_with_datap;
+ for (; chain != NULL && (size_t)size >= chain->off; chain = next) {
+ next = chain->next;
+
+ memcpy(buffer, chain->buffer + chain->misalign, chain->off);
+ size -= chain->off;
+ buffer += chain->off;
+ if (chain == last_with_data)
+ removed_last_with_data = 1;
+ if (&chain->next == buf->last_with_datap)
+ removed_last_with_datap = 1;
+
+ evbuffer_chain_free(chain);
+ }
+
+ if (chain != NULL) {
+ memcpy(buffer, chain->buffer + chain->misalign, size);
+ chain->misalign += size;
+ chain->off -= size;
+ } else {
+ buf->last = tmp;
+ }
+
+ tmp->next = chain;
+
+ if (removed_last_with_data) {
+ buf->last_with_datap = &buf->first;
+ } else if (removed_last_with_datap) {
+ if (buf->first->next && buf->first->next->off)
+ buf->last_with_datap = &buf->first->next;
+ else
+ buf->last_with_datap = &buf->first;
+ }
+
+ result = (tmp->buffer + tmp->misalign);
+
+done:
+ EVBUFFER_UNLOCK(buf);
+ return result;
+}
+
+/*
+ * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
+ * The returned buffer needs to be freed by the called.
+ */
+char *
+evbuffer_readline(struct evbuffer *buffer)
+{
+ return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY);
+}
+
+static inline ev_ssize_t
+evbuffer_strchr(struct evbuffer_ptr *it, const char chr)
+{
+ struct evbuffer_chain *chain = it->internal_.chain;
+ size_t i = it->internal_.pos_in_chain;
+ while (chain != NULL) {
+ char *buffer = (char *)chain->buffer + chain->misalign;
+ char *cp = memchr(buffer+i, chr, chain->off-i);
+ if (cp) {
+ it->internal_.chain = chain;
+ it->internal_.pos_in_chain = cp - buffer;
+ it->pos += (cp - buffer - i);
+ return it->pos;
+ }
+ it->pos += chain->off - i;
+ i = 0;
+ chain = chain->next;
+ }
+
+ return (-1);
+}
+
+static inline char *
+find_eol_char(char *s, size_t len)
+{
+#define CHUNK_SZ 128
+ /* Lots of benchmarking found this approach to be faster in practice
+ * than doing two memchrs over the whole buffer, doin a memchr on each
+ * char of the buffer, or trying to emulate memchr by hand. */
+ char *s_end, *cr, *lf;
+ s_end = s+len;
+ while (s < s_end) {
+ size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s);
+ cr = memchr(s, '\r', chunk);
+ lf = memchr(s, '\n', chunk);
+ if (cr) {
+ if (lf && lf < cr)
+ return lf;
+ return cr;
+ } else if (lf) {
+ return lf;
+ }
+ s += CHUNK_SZ;
+ }
+
+ return NULL;
+#undef CHUNK_SZ
+}
+
+static ev_ssize_t
+evbuffer_find_eol_char(struct evbuffer_ptr *it)
+{
+ struct evbuffer_chain *chain = it->internal_.chain;
+ size_t i = it->internal_.pos_in_chain;
+ while (chain != NULL) {
+ char *buffer = (char *)chain->buffer + chain->misalign;
+ char *cp = find_eol_char(buffer+i, chain->off-i);
+ if (cp) {
+ it->internal_.chain = chain;
+ it->internal_.pos_in_chain = cp - buffer;
+ it->pos += (cp - buffer) - i;
+ return it->pos;
+ }
+ it->pos += chain->off - i;
+ i = 0;
+ chain = chain->next;
+ }
+
+ return (-1);
+}
+
+static inline int
+evbuffer_strspn(
+ struct evbuffer_ptr *ptr, const char *chrset)
+{
+ int count = 0;
+ struct evbuffer_chain *chain = ptr->internal_.chain;
+ size_t i = ptr->internal_.pos_in_chain;
+
+ if (!chain)
+ return 0;
+
+ while (1) {
+ char *buffer = (char *)chain->buffer + chain->misalign;
+ for (; i < chain->off; ++i) {
+ const char *p = chrset;
+ while (*p) {
+ if (buffer[i] == *p++)
+ goto next;
+ }
+ ptr->internal_.chain = chain;
+ ptr->internal_.pos_in_chain = i;
+ ptr->pos += count;
+ return count;
+ next:
+ ++count;
+ }
+ i = 0;
+
+ if (! chain->next) {
+ ptr->internal_.chain = chain;
+ ptr->internal_.pos_in_chain = i;
+ ptr->pos += count;
+ return count;
+ }
+
+ chain = chain->next;
+ }
+}
+
+
+static inline int
+evbuffer_getchr(struct evbuffer_ptr *it)
+{
+ struct evbuffer_chain *chain = it->internal_.chain;
+ size_t off = it->internal_.pos_in_chain;
+
+ if (chain == NULL)
+ return -1;
+
+ return (unsigned char)chain->buffer[chain->misalign + off];
+}
+
+struct evbuffer_ptr
+evbuffer_search_eol(struct evbuffer *buffer,
+ struct evbuffer_ptr *start, size_t *eol_len_out,
+ enum evbuffer_eol_style eol_style)
+{
+ struct evbuffer_ptr it, it2;
+ size_t extra_drain = 0;
+ int ok = 0;
+
+ /* Avoid locking in trivial edge cases */
+ if (start && start->internal_.chain == NULL) {
+ PTR_NOT_FOUND(&it);
+ if (eol_len_out)
+ *eol_len_out = extra_drain;
+ return it;
+ }
+
+ EVBUFFER_LOCK(buffer);
+
+ if (start) {
+ memcpy(&it, start, sizeof(it));
+ } else {
+ it.pos = 0;
+ it.internal_.chain = buffer->first;
+ it.internal_.pos_in_chain = 0;
+ }
+
+ /* the eol_style determines our first stop character and how many
+ * characters we are going to drain afterwards. */
+ switch (eol_style) {
+ case EVBUFFER_EOL_ANY:
+ if (evbuffer_find_eol_char(&it) < 0)
+ goto done;
+ memcpy(&it2, &it, sizeof(it));
+ extra_drain = evbuffer_strspn(&it2, "\r\n");
+ break;
+ case EVBUFFER_EOL_CRLF_STRICT: {
+ it = evbuffer_search(buffer, "\r\n", 2, &it);
+ if (it.pos < 0)
+ goto done;
+ extra_drain = 2;
+ break;
+ }
+ case EVBUFFER_EOL_CRLF: {
+ ev_ssize_t start_pos = it.pos;
+ /* Look for a LF ... */
+ if (evbuffer_strchr(&it, '\n') < 0)
+ goto done;
+ extra_drain = 1;
+ /* ... optionally preceeded by a CR. */
+ if (it.pos == start_pos)
+ break; /* If the first character is \n, don't back up */
+ /* This potentially does an extra linear walk over the first
+ * few chains. Probably, that's not too expensive unless you
+ * have a really pathological setup. */
+ memcpy(&it2, &it, sizeof(it));
+ if (evbuffer_ptr_subtract(buffer, &it2, 1)<0)
+ break;
+ if (evbuffer_getchr(&it2) == '\r') {
+ memcpy(&it, &it2, sizeof(it));
+ extra_drain = 2;
+ }
+ break;
+ }
+ case EVBUFFER_EOL_LF:
+ if (evbuffer_strchr(&it, '\n') < 0)
+ goto done;
+ extra_drain = 1;
+ break;
+ case EVBUFFER_EOL_NUL:
+ if (evbuffer_strchr(&it, '\0') < 0)
+ goto done;
+ extra_drain = 1;
+ break;
+ default:
+ goto done;
+ }
+
+ ok = 1;
+done:
+ EVBUFFER_UNLOCK(buffer);
+
+ if (!ok)
+ PTR_NOT_FOUND(&it);
+ if (eol_len_out)
+ *eol_len_out = extra_drain;
+
+ return it;
+}
+
+char *
+evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
+ enum evbuffer_eol_style eol_style)
+{
+ struct evbuffer_ptr it;
+ char *line;
+ size_t n_to_copy=0, extra_drain=0;
+ char *result = NULL;
+
+ EVBUFFER_LOCK(buffer);
+
+ if (buffer->freeze_start) {
+ goto done;
+ }
+
+ it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style);
+ if (it.pos < 0)
+ goto done;
+ n_to_copy = it.pos;
+
+ if ((line = mm_malloc(n_to_copy+1)) == NULL) {
+ event_warn("%s: out of memory", __func__);
+ goto done;
+ }
+
+ evbuffer_remove(buffer, line, n_to_copy);
+ line[n_to_copy] = '\0';
+
+ evbuffer_drain(buffer, extra_drain);
+ result = line;
+done:
+ EVBUFFER_UNLOCK(buffer);
+
+ if (n_read_out)
+ *n_read_out = result ? n_to_copy : 0;
+
+ return result;
+}
+
+#define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096
+
+/* Adds data to an event buffer */
+
+int
+evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen)
+{
+ struct evbuffer_chain *chain, *tmp;
+ const unsigned char *data = data_in;
+ size_t remain, to_alloc;
+ int result = -1;
+
+ EVBUFFER_LOCK(buf);
+
+ if (buf->freeze_end) {
+ goto done;
+ }
+ /* Prevent buf->total_len overflow */
+ if (datlen > EV_SIZE_MAX - buf->total_len) {
+ goto done;
+ }
+
+ chain = buf->last;
+
+ /* If there are no chains allocated for this buffer, allocate one
+ * big enough to hold all the data. */
+ if (chain == NULL) {
+ chain = evbuffer_chain_new(datlen);
+ if (!chain)
+ goto done;
+ evbuffer_chain_insert(buf, chain);
+ }
+
+ if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
+ /* Always true for mutable buffers */
+ EVUTIL_ASSERT(chain->misalign >= 0 &&
+ (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX);
+ remain = chain->buffer_len - (size_t)chain->misalign - chain->off;
+ if (remain >= datlen) {
+ /* there's enough space to hold all the data in the
+ * current last chain */
+ memcpy(chain->buffer + chain->misalign + chain->off,
+ data, datlen);
+ chain->off += datlen;
+ buf->total_len += datlen;
+ buf->n_add_for_cb += datlen;
+ goto out;
+ } else if (!CHAIN_PINNED(chain) &&
+ evbuffer_chain_should_realign(chain, datlen)) {
+ /* we can fit the data into the misalignment */
+ evbuffer_chain_align(chain);
+
+ memcpy(chain->buffer + chain->off, data, datlen);
+ chain->off += datlen;
+ buf->total_len += datlen;
+ buf->n_add_for_cb += datlen;
+ goto out;
+ }
+ } else {
+ /* we cannot write any data to the last chain */
+ remain = 0;
+ }
+
+ /* we need to add another chain */
+ to_alloc = chain->buffer_len;
+ if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2)
+ to_alloc <<= 1;
+ if (datlen > to_alloc)
+ to_alloc = datlen;
+ tmp = evbuffer_chain_new(to_alloc);
+ if (tmp == NULL)
+ goto done;
+
+ if (remain) {
+ memcpy(chain->buffer + chain->misalign + chain->off,
+ data, remain);
+ chain->off += remain;
+ buf->total_len += remain;
+ buf->n_add_for_cb += remain;
+ }
+
+ data += remain;
+ datlen -= remain;
+
+ memcpy(tmp->buffer, data, datlen);
+ tmp->off = datlen;
+ evbuffer_chain_insert(buf, tmp);
+ buf->n_add_for_cb += datlen;
+
+out:
+ evbuffer_invoke_callbacks_(buf);
+ result = 0;
+done:
+ EVBUFFER_UNLOCK(buf);
+ return result;
+}
+
+int
+evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen)
+{
+ struct evbuffer_chain *chain, *tmp;
+ int result = -1;
+
+ EVBUFFER_LOCK(buf);
+
+ if (buf->freeze_start) {
+ goto done;
+ }
+ if (datlen > EV_SIZE_MAX - buf->total_len) {
+ goto done;
+ }
+
+ chain = buf->first;
+
+ if (chain == NULL) {
+ chain = evbuffer_chain_new(datlen);
+ if (!chain)
+ goto done;
+ evbuffer_chain_insert(buf, chain);
+ }
+
+ /* we cannot touch immutable buffers */
+ if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
+ /* Always true for mutable buffers */
+ EVUTIL_ASSERT(chain->misalign >= 0 &&
+ (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX);
+
+ /* If this chain is empty, we can treat it as
+ * 'empty at the beginning' rather than 'empty at the end' */
+ if (chain->off == 0)
+ chain->misalign = chain->buffer_len;
+
+ if ((size_t)chain->misalign >= datlen) {
+ /* we have enough space to fit everything */
+ memcpy(chain->buffer + chain->misalign - datlen,
+ data, datlen);
+ chain->off += datlen;
+ chain->misalign -= datlen;
+ buf->total_len += datlen;
+ buf->n_add_for_cb += datlen;
+ goto out;
+ } else if (chain->misalign) {
+ /* we can only fit some of the data. */
+ memcpy(chain->buffer,
+ (char*)data + datlen - chain->misalign,
+ (size_t)chain->misalign);
+ chain->off += (size_t)chain->misalign;
+ buf->total_len += (size_t)chain->misalign;
+ buf->n_add_for_cb += (size_t)chain->misalign;
+ datlen -= (size_t)chain->misalign;
+ chain->misalign = 0;
+ }
+ }
+
+ /* we need to add another chain */
+ if ((tmp = evbuffer_chain_new(datlen)) == NULL)
+ goto done;
+ buf->first = tmp;
+ if (buf->last_with_datap == &buf->first)
+ buf->last_with_datap = &tmp->next;
+
+ tmp->next = chain;
+
+ tmp->off = datlen;
+ EVUTIL_ASSERT(datlen <= tmp->buffer_len);
+ tmp->misalign = tmp->buffer_len - datlen;
+
+ memcpy(tmp->buffer + tmp->misalign, data, datlen);
+ buf->total_len += datlen;
+ buf->n_add_for_cb += (size_t)chain->misalign;
+
+out:
+ evbuffer_invoke_callbacks_(buf);
+ result = 0;
+done:
+ EVBUFFER_UNLOCK(buf);
+ return result;
+}
+
+/** Helper: realigns the memory in chain->buffer so that misalign is 0. */
+static void
+evbuffer_chain_align(struct evbuffer_chain *chain)
+{
+ EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE));
+ EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY));
+ memmove(chain->buffer, chain->buffer + chain->misalign, chain->off);
+ chain->misalign = 0;
+}
+
+#define MAX_TO_COPY_IN_EXPAND 4096
+#define MAX_TO_REALIGN_IN_EXPAND 2048
+
+/** Helper: return true iff we should realign chain to fit datalen bytes of
+ data in it. */
+static int
+evbuffer_chain_should_realign(struct evbuffer_chain *chain,
+ size_t datlen)
+{
+ return chain->buffer_len - chain->off >= datlen &&
+ (chain->off < chain->buffer_len / 2) &&
+ (chain->off <= MAX_TO_REALIGN_IN_EXPAND);
+}
+
+/* Expands the available space in the event buffer to at least datlen, all in
+ * a single chunk. Return that chunk. */
+static struct evbuffer_chain *
+evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen)
+{
+ struct evbuffer_chain *chain, **chainp;
+ struct evbuffer_chain *result = NULL;
+ ASSERT_EVBUFFER_LOCKED(buf);
+
+ chainp = buf->last_with_datap;
+
+ /* XXX If *chainp is no longer writeable, but has enough space in its
+ * misalign, this might be a bad idea: we could still use *chainp, not
+ * (*chainp)->next. */
+ if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0)
+ chainp = &(*chainp)->next;
+
+ /* 'chain' now points to the first chain with writable space (if any)
+ * We will either use it, realign it, replace it, or resize it. */
+ chain = *chainp;
+
+ if (chain == NULL ||
+ (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) {
+ /* We can't use the last_with_data chain at all. Just add a
+ * new one that's big enough. */
+ goto insert_new;
+ }
+
+ /* If we can fit all the data, then we don't have to do anything */
+ if (CHAIN_SPACE_LEN(chain) >= datlen) {
+ result = chain;
+ goto ok;
+ }
+
+ /* If the chain is completely empty, just replace it by adding a new
+ * empty chain. */
+ if (chain->off == 0) {
+ goto insert_new;
+ }
+
+ /* If the misalignment plus the remaining space fulfills our data
+ * needs, we could just force an alignment to happen. Afterwards, we
+ * have enough space. But only do this if we're saving a lot of space
+ * and not moving too much data. Otherwise the space savings are
+ * probably offset by the time lost in copying.
+ */
+ if (evbuffer_chain_should_realign(chain, datlen)) {
+ evbuffer_chain_align(chain);
+ result = chain;
+ goto ok;
+ }
+
+ /* At this point, we can either resize the last chunk with space in
+ * it, use the next chunk after it, or If we add a new chunk, we waste
+ * CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we
+ * resize, we have to copy chain->off bytes.
+ */
+
+ /* Would expanding this chunk be affordable and worthwhile? */
+ if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 ||
+ chain->off > MAX_TO_COPY_IN_EXPAND ||
+ (datlen < EVBUFFER_CHAIN_MAX &&
+ EVBUFFER_CHAIN_MAX - datlen >= chain->off)) {
+ /* It's not worth resizing this chain. Can the next one be
+ * used? */
+ if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) {
+ /* Yes, we can just use the next chain (which should
+ * be empty. */
+ result = chain->next;
+ goto ok;
+ } else {
+ /* No; append a new chain (which will free all
+ * terminal empty chains.) */
+ goto insert_new;
+ }
+ } else {
+ /* Okay, we're going to try to resize this chain: Not doing so
+ * would waste at least 1/8 of its current allocation, and we
+ * can do so without having to copy more than
+ * MAX_TO_COPY_IN_EXPAND bytes. */
+ /* figure out how much space we need */
+ size_t length = chain->off + datlen;
+ struct evbuffer_chain *tmp = evbuffer_chain_new(length);
+ if (tmp == NULL)
+ goto err;
+
+ /* copy the data over that we had so far */
+ tmp->off = chain->off;
+ memcpy(tmp->buffer, chain->buffer + chain->misalign,
+ chain->off);
+ /* fix up the list */
+ EVUTIL_ASSERT(*chainp == chain);
+ result = *chainp = tmp;
+
+ if (buf->last == chain)
+ buf->last = tmp;
+
+ tmp->next = chain->next;
+ evbuffer_chain_free(chain);
+ goto ok;
+ }
+
+insert_new:
+ result = evbuffer_chain_insert_new(buf, datlen);
+ if (!result)
+ goto err;
+ok:
+ EVUTIL_ASSERT(result);
+ EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen);
+err:
+ return result;
+}
+
+/* Make sure that datlen bytes are available for writing in the last n
+ * chains. Never copies or moves data. */
+int
+evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n)
+{
+ struct evbuffer_chain *chain = buf->last, *tmp, *next;
+ size_t avail;
+ int used;
+
+ ASSERT_EVBUFFER_LOCKED(buf);
+ EVUTIL_ASSERT(n >= 2);
+
+ if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) {
+ /* There is no last chunk, or we can't touch the last chunk.
+ * Just add a new chunk. */
+ chain = evbuffer_chain_new(datlen);
+ if (chain == NULL)
+ return (-1);
+
+ evbuffer_chain_insert(buf, chain);
+ return (0);
+ }
+
+ used = 0; /* number of chains we're using space in. */
+ avail = 0; /* how much space they have. */
+ /* How many bytes can we stick at the end of buffer as it is? Iterate
+ * over the chains at the end of the buffer, tring to see how much
+ * space we have in the first n. */
+ for (chain = *buf->last_with_datap; chain; chain = chain->next) {
+ if (chain->off) {
+ size_t space = (size_t) CHAIN_SPACE_LEN(chain);
+ EVUTIL_ASSERT(chain == *buf->last_with_datap);
+ if (space) {
+ avail += space;
+ ++used;
+ }
+ } else {
+ /* No data in chain; realign it. */
+ chain->misalign = 0;
+ avail += chain->buffer_len;
+ ++used;
+ }
+ if (avail >= datlen) {
+ /* There is already enough space. Just return */
+ return (0);
+ }
+ if (used == n)
+ break;
+ }
+
+ /* There wasn't enough space in the first n chains with space in
+ * them. Either add a new chain with enough space, or replace all
+ * empty chains with one that has enough space, depending on n. */
+ if (used < n) {
+ /* The loop ran off the end of the chains before it hit n
+ * chains; we can add another. */
+ EVUTIL_ASSERT(chain == NULL);
+
+ tmp = evbuffer_chain_new(datlen - avail);
+ if (tmp == NULL)
+ return (-1);
+
+ buf->last->next = tmp;
+ buf->last = tmp;
+ /* (we would only set last_with_data if we added the first
+ * chain. But if the buffer had no chains, we would have
+ * just allocated a new chain earlier) */
+ return (0);
+ } else {
+ /* Nuke _all_ the empty chains. */
+ int rmv_all = 0; /* True iff we removed last_with_data. */
+ chain = *buf->last_with_datap;
+ if (!chain->off) {
+ EVUTIL_ASSERT(chain == buf->first);
+ rmv_all = 1;
+ avail = 0;
+ } else {
+ /* can't overflow, since only mutable chains have
+ * huge misaligns. */
+ avail = (size_t) CHAIN_SPACE_LEN(chain);
+ chain = chain->next;
+ }
+
+
+ for (; chain; chain = next) {
+ next = chain->next;
+ EVUTIL_ASSERT(chain->off == 0);
+ evbuffer_chain_free(chain);
+ }
+ EVUTIL_ASSERT(datlen >= avail);
+ tmp = evbuffer_chain_new(datlen - avail);
+ if (tmp == NULL) {
+ if (rmv_all) {
+ ZERO_CHAIN(buf);
+ } else {
+ buf->last = *buf->last_with_datap;
+ (*buf->last_with_datap)->next = NULL;
+ }
+ return (-1);
+ }
+
+ if (rmv_all) {
+ buf->first = buf->last = tmp;
+ buf->last_with_datap = &buf->first;
+ } else {
+ (*buf->last_with_datap)->next = tmp;
+ buf->last = tmp;
+ }
+ return (0);
+ }
+}
+
+int
+evbuffer_expand(struct evbuffer *buf, size_t datlen)
+{
+ struct evbuffer_chain *chain;
+
+ EVBUFFER_LOCK(buf);
+ chain = evbuffer_expand_singlechain(buf, datlen);
+ EVBUFFER_UNLOCK(buf);
+ return chain ? 0 : -1;
+}
+
+/*
+ * Reads data from a file descriptor into a buffer.
+ */
+
+#if defined(EVENT__HAVE_SYS_UIO_H) || defined(_WIN32)
+#define USE_IOVEC_IMPL
+#endif
+
+#ifdef USE_IOVEC_IMPL
+
+#ifdef EVENT__HAVE_SYS_UIO_H
+/* number of iovec we use for writev, fragmentation is going to determine
+ * how much we end up writing */
+
+#define DEFAULT_WRITE_IOVEC 128
+
+#if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC
+#define NUM_WRITE_IOVEC UIO_MAXIOV
+#elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC
+#define NUM_WRITE_IOVEC IOV_MAX
+#else
+#define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC
+#endif
+
+#define IOV_TYPE struct iovec
+#define IOV_PTR_FIELD iov_base
+#define IOV_LEN_FIELD iov_len
+#define IOV_LEN_TYPE size_t
+#else
+#define NUM_WRITE_IOVEC 16
+#define IOV_TYPE WSABUF
+#define IOV_PTR_FIELD buf
+#define IOV_LEN_FIELD len
+#define IOV_LEN_TYPE unsigned long
+#endif
+#endif
+#define NUM_READ_IOVEC 4
+
+#define EVBUFFER_MAX_READ 4096
+
+/** Helper function to figure out which space to use for reading data into
+ an evbuffer. Internal use only.
+
+ @param buf The buffer to read into
+ @param howmuch How much we want to read.
+ @param vecs An array of two or more iovecs or WSABUFs.
+ @param n_vecs_avail The length of vecs
+ @param chainp A pointer to a variable to hold the first chain we're
+ reading into.
+ @param exact Boolean: if true, we do not provide more than 'howmuch'
+ space in the vectors, even if more space is available.
+ @return The number of buffers we're using.
+ */
+int
+evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch,
+ struct evbuffer_iovec *vecs, int n_vecs_avail,
+ struct evbuffer_chain ***chainp, int exact)
+{
+ struct evbuffer_chain *chain;
+ struct evbuffer_chain **firstchainp;
+ size_t so_far;
+ int i;
+ ASSERT_EVBUFFER_LOCKED(buf);
+
+ if (howmuch < 0)
+ return -1;
+
+ so_far = 0;
+ /* Let firstchain be the first chain with any space on it */
+ firstchainp = buf->last_with_datap;
+ if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
+ firstchainp = &(*firstchainp)->next;
+ }
+
+ chain = *firstchainp;
+ for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) {
+ size_t avail = (size_t) CHAIN_SPACE_LEN(chain);
+ if (avail > (howmuch - so_far) && exact)
+ avail = howmuch - so_far;
+ vecs[i].iov_base = CHAIN_SPACE_PTR(chain);
+ vecs[i].iov_len = avail;
+ so_far += avail;
+ chain = chain->next;
+ }
+
+ *chainp = firstchainp;
+ return i;
+}
+
+static int
+get_n_bytes_readable_on_socket(evutil_socket_t fd)
+{
+#if defined(FIONREAD) && defined(_WIN32)
+ unsigned long lng = EVBUFFER_MAX_READ;
+ if (ioctlsocket(fd, FIONREAD, &lng) < 0)
+ return -1;
+ /* Can overflow, but mostly harmlessly. XXXX */
+ return (int)lng;
+#elif defined(FIONREAD)
+ int n = EVBUFFER_MAX_READ;
+ if (ioctl(fd, FIONREAD, &n) < 0)
+ return -1;
+ return n;
+#else
+ return EVBUFFER_MAX_READ;
+#endif
+}
+
+/* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t
+ * as howmuch? */
+int
+evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch)
+{
+ struct evbuffer_chain **chainp;
+ int n;
+ int result;
+
+#ifdef USE_IOVEC_IMPL
+ int nvecs, i, remaining;
+#else
+ struct evbuffer_chain *chain;
+ unsigned char *p;
+#endif
+
+ EVBUFFER_LOCK(buf);
+
+ if (buf->freeze_end) {
+ result = -1;
+ goto done;
+ }
+
+ n = get_n_bytes_readable_on_socket(fd);
+ if (n <= 0 || n > EVBUFFER_MAX_READ)
+ n = EVBUFFER_MAX_READ;
+ if (howmuch < 0 || howmuch > n)
+ howmuch = n;
+
+#ifdef USE_IOVEC_IMPL
+ /* Since we can use iovecs, we're willing to use the last
+ * NUM_READ_IOVEC chains. */
+ if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) {
+ result = -1;
+ goto done;
+ } else {
+ IOV_TYPE vecs[NUM_READ_IOVEC];
+#ifdef EVBUFFER_IOVEC_IS_NATIVE_
+ nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs,
+ NUM_READ_IOVEC, &chainp, 1);
+#else
+ /* We aren't using the native struct iovec. Therefore,
+ we are on win32. */
+ struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];
+ nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2,
+ &chainp, 1);
+
+ for (i=0; i < nvecs; ++i)
+ WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]);
+#endif
+
+#ifdef _WIN32
+ {
+ DWORD bytesRead;
+ DWORD flags=0;
+ if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) {
+ /* The read failed. It might be a close,
+ * or it might be an error. */
+ if (WSAGetLastError() == WSAECONNABORTED)
+ n = 0;
+ else
+ n = -1;
+ } else
+ n = bytesRead;
+ }
+#else
+ n = readv(fd, vecs, nvecs);
+#endif
+ }
+
+#else /*!USE_IOVEC_IMPL*/
+ /* If we don't have FIONREAD, we might waste some space here */
+ /* XXX we _will_ waste some space here if there is any space left
+ * over on buf->last. */
+ if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) {
+ result = -1;
+ goto done;
+ }
+
+ /* We can append new data at this point */
+ p = chain->buffer + chain->misalign + chain->off;
+
+#ifndef _WIN32
+ n = read(fd, p, howmuch);
+#else
+ n = recv(fd, p, howmuch, 0);
+#endif
+#endif /* USE_IOVEC_IMPL */
+
+ if (n == -1) {
+ result = -1;
+ goto done;
+ }
+ if (n == 0) {
+ result = 0;
+ goto done;
+ }
+
+#ifdef USE_IOVEC_IMPL
+ remaining = n;
+ for (i=0; i < nvecs; ++i) {
+ /* can't overflow, since only mutable chains have
+ * huge misaligns. */
+ size_t space = (size_t) CHAIN_SPACE_LEN(*chainp);
+ /* XXXX This is a kludge that can waste space in perverse
+ * situations. */
+ if (space > EVBUFFER_CHAIN_MAX)
+ space = EVBUFFER_CHAIN_MAX;
+ if ((ev_ssize_t)space < remaining) {
+ (*chainp)->off += space;
+ remaining -= (int)space;
+ } else {
+ (*chainp)->off += remaining;
+ buf->last_with_datap = chainp;
+ break;
+ }
+ chainp = &(*chainp)->next;
+ }
+#else
+ chain->off += n;
+ advance_last_with_data(buf);
+#endif
+ buf->total_len += n;
+ buf->n_add_for_cb += n;
+
+ /* Tell someone about changes in this buffer */
+ evbuffer_invoke_callbacks_(buf);
+ result = n;
+done:
+ EVBUFFER_UNLOCK(buf);
+ return result;
+}
+
+#ifdef USE_IOVEC_IMPL
+static inline int
+evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd,
+ ev_ssize_t howmuch)
+{
+ IOV_TYPE iov[NUM_WRITE_IOVEC];
+ struct evbuffer_chain *chain = buffer->first;
+ int n, i = 0;
+
+ if (howmuch < 0)
+ return -1;
+
+ ASSERT_EVBUFFER_LOCKED(buffer);
+ /* XXX make this top out at some maximal data length? if the
+ * buffer has (say) 1MB in it, split over 128 chains, there's
+ * no way it all gets written in one go. */
+ while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) {
+#ifdef USE_SENDFILE
+ /* we cannot write the file info via writev */
+ if (chain->flags & EVBUFFER_SENDFILE)
+ break;
+#endif
+ iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign);
+ if ((size_t)howmuch >= chain->off) {
+ /* XXXcould be problematic when windows supports mmap*/
+ iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off;
+ howmuch -= chain->off;
+ } else {
+ /* XXXcould be problematic when windows supports mmap*/
+ iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch;
+ break;
+ }
+ chain = chain->next;
+ }
+ if (! i)
+ return 0;
+
+#ifdef _WIN32
+ {
+ DWORD bytesSent;
+ if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL))
+ n = -1;
+ else
+ n = bytesSent;
+ }
+#else
+ n = writev(fd, iov, i);
+#endif
+ return (n);
+}
+#endif
+
+#ifdef USE_SENDFILE
+static inline int
+evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t dest_fd,
+ ev_ssize_t howmuch)
+{
+ struct evbuffer_chain *chain = buffer->first;
+ struct evbuffer_chain_file_segment *info =
+ EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment,
+ chain);
+ const int source_fd = info->segment->fd;
+#if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD)
+ int res;
+ ev_off_t len = chain->off;
+#elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS)
+ ev_ssize_t res;
+ ev_off_t offset = chain->misalign;
+#endif
+
+ ASSERT_EVBUFFER_LOCKED(buffer);
+
+#if defined(SENDFILE_IS_MACOSX)
+ res = sendfile(source_fd, dest_fd, chain->misalign, &len, NULL, 0);
+ if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
+ return (-1);
+
+ return (len);
+#elif defined(SENDFILE_IS_FREEBSD)
+ res = sendfile(source_fd, dest_fd, chain->misalign, chain->off, NULL, &len, 0);
+ if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
+ return (-1);
+
+ return (len);
+#elif defined(SENDFILE_IS_LINUX)
+ /* TODO(niels): implement splice */
+ res = sendfile(dest_fd, source_fd, &offset, chain->off);
+ if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
+ /* if this is EAGAIN or EINTR return 0; otherwise, -1 */
+ return (0);
+ }
+ return (res);
+#elif defined(SENDFILE_IS_SOLARIS)
+ {
+ const off_t offset_orig = offset;
+ res = sendfile(dest_fd, source_fd, &offset, chain->off);
+ if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
+ if (offset - offset_orig)
+ return offset - offset_orig;
+ /* if this is EAGAIN or EINTR and no bytes were
+ * written, return 0 */
+ return (0);
+ }
+ return (res);
+ }
+#endif
+}
+#endif
+
+int
+evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd,
+ ev_ssize_t howmuch)
+{
+ int n = -1;
+
+ EVBUFFER_LOCK(buffer);
+
+ if (buffer->freeze_start) {
+ goto done;
+ }
+
+ if (howmuch < 0 || (size_t)howmuch > buffer->total_len)
+ howmuch = buffer->total_len;
+
+ if (howmuch > 0) {
+#ifdef USE_SENDFILE
+ struct evbuffer_chain *chain = buffer->first;
+ if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE))
+ n = evbuffer_write_sendfile(buffer, fd, howmuch);
+ else {
+#endif
+#ifdef USE_IOVEC_IMPL
+ n = evbuffer_write_iovec(buffer, fd, howmuch);
+#elif defined(_WIN32)
+ /* XXX(nickm) Don't disable this code until we know if
+ * the WSARecv code above works. */
+ void *p = evbuffer_pullup(buffer, howmuch);
+ EVUTIL_ASSERT(p || !howmuch);
+ n = send(fd, p, howmuch, 0);
+#else
+ void *p = evbuffer_pullup(buffer, howmuch);
+ EVUTIL_ASSERT(p || !howmuch);
+ n = write(fd, p, howmuch);
+#endif
+#ifdef USE_SENDFILE
+ }
+#endif
+ }
+
+ if (n > 0)
+ evbuffer_drain(buffer, n);
+
+done:
+ EVBUFFER_UNLOCK(buffer);
+ return (n);
+}
+
+int
+evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd)
+{
+ return evbuffer_write_atmost(buffer, fd, -1);
+}
+
+unsigned char *
+evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len)
+{
+ unsigned char *search;
+ struct evbuffer_ptr ptr;
+
+ EVBUFFER_LOCK(buffer);
+
+ ptr = evbuffer_search(buffer, (const char *)what, len, NULL);
+ if (ptr.pos < 0) {
+ search = NULL;
+ } else {
+ search = evbuffer_pullup(buffer, ptr.pos + len);
+ if (search)
+ search += ptr.pos;
+ }
+ EVBUFFER_UNLOCK(buffer);
+ return search;
+}
+
+/* Subract <b>howfar</b> from the position of <b>pos</b> within
+ * <b>buf</b>. Returns 0 on success, -1 on failure.
+ *
+ * This isn't exposed yet, because of potential inefficiency issues.
+ * Maybe it should be. */
+static int
+evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos,
+ size_t howfar)
+{
+ if (pos->pos < 0)
+ return -1;
+ if (howfar > (size_t)pos->pos)
+ return -1;
+ if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) {
+ pos->internal_.pos_in_chain -= howfar;
+ pos->pos -= howfar;
+ return 0;
+ } else {
+ const size_t newpos = pos->pos - howfar;
+ /* Here's the inefficient part: it walks over the
+ * chains until we hit newpos. */
+ return evbuffer_ptr_set(buf, pos, newpos, EVBUFFER_PTR_SET);
+ }
+}
+
+int
+evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
+ size_t position, enum evbuffer_ptr_how how)
+{
+ size_t left = position;
+ struct evbuffer_chain *chain = NULL;
+ int result = 0;
+
+ EVBUFFER_LOCK(buf);
+
+ switch (how) {
+ case EVBUFFER_PTR_SET:
+ chain = buf->first;
+ pos->pos = position;
+ position = 0;
+ break;
+ case EVBUFFER_PTR_ADD:
+ /* this avoids iterating over all previous chains if
+ we just want to advance the position */
+ if (pos->pos < 0 || EV_SIZE_MAX - position < (size_t)pos->pos) {
+ EVBUFFER_UNLOCK(buf);
+ return -1;
+ }
+ chain = pos->internal_.chain;
+ pos->pos += position;
+ position = pos->internal_.pos_in_chain;
+ break;
+ }
+
+ EVUTIL_ASSERT(EV_SIZE_MAX - left >= position);
+ while (chain && position + left >= chain->off) {
+ left -= chain->off - position;
+ chain = chain->next;
+ position = 0;
+ }
+ if (chain) {
+ pos->internal_.chain = chain;
+ pos->internal_.pos_in_chain = position + left;
+ } else if (left == 0) {
+ /* The first byte in the (nonexistent) chain after the last chain */
+ pos->internal_.chain = NULL;
+ pos->internal_.pos_in_chain = 0;
+ } else {
+ PTR_NOT_FOUND(pos);
+ result = -1;
+ }
+
+ EVBUFFER_UNLOCK(buf);
+
+ return result;
+}
+
+/**
+ Compare the bytes in buf at position pos to the len bytes in mem. Return
+ less than 0, 0, or greater than 0 as memcmp.
+ */
+static int
+evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos,
+ const char *mem, size_t len)
+{
+ struct evbuffer_chain *chain;
+ size_t position;
+ int r;
+
+ ASSERT_EVBUFFER_LOCKED(buf);
+
+ if (pos->pos < 0 ||
+ EV_SIZE_MAX - len < (size_t)pos->pos ||
+ pos->pos + len > buf->total_len)
+ return -1;
+
+ chain = pos->internal_.chain;
+ position = pos->internal_.pos_in_chain;
+ while (len && chain) {
+ size_t n_comparable;
+ if (len + position > chain->off)
+ n_comparable = chain->off - position;
+ else
+ n_comparable = len;
+ r = memcmp(chain->buffer + chain->misalign + position, mem,
+ n_comparable);
+ if (r)
+ return r;
+ mem += n_comparable;
+ len -= n_comparable;
+ position = 0;
+ chain = chain->next;
+ }
+
+ return 0;
+}
+
+struct evbuffer_ptr
+evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start)
+{
+ return evbuffer_search_range(buffer, what, len, start, NULL);
+}
+
+struct evbuffer_ptr
+evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end)
+{
+ struct evbuffer_ptr pos;
+ struct evbuffer_chain *chain, *last_chain = NULL;
+ const unsigned char *p;
+ char first;
+
+ EVBUFFER_LOCK(buffer);
+
+ if (start) {
+ memcpy(&pos, start, sizeof(pos));
+ chain = pos.internal_.chain;
+ } else {
+ pos.pos = 0;
+ chain = pos.internal_.chain = buffer->first;
+ pos.internal_.pos_in_chain = 0;
+ }
+
+ if (end)
+ last_chain = end->internal_.chain;
+
+ if (!len || len > EV_SSIZE_MAX)
+ goto done;
+
+ first = what[0];
+
+ while (chain) {
+ const unsigned char *start_at =
+ chain->buffer + chain->misalign +
+ pos.internal_.pos_in_chain;
+ p = memchr(start_at, first,
+ chain->off - pos.internal_.pos_in_chain);
+ if (p) {
+ pos.pos += p - start_at;
+ pos.internal_.pos_in_chain += p - start_at;
+ if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
+ if (end && pos.pos + (ev_ssize_t)len > end->pos)
+ goto not_found;
+ else
+ goto done;
+ }
+ ++pos.pos;
+ ++pos.internal_.pos_in_chain;
+ if (pos.internal_.pos_in_chain == chain->off) {
+ chain = pos.internal_.chain = chain->next;
+ pos.internal_.pos_in_chain = 0;
+ }
+ } else {
+ if (chain == last_chain)
+ goto not_found;
+ pos.pos += chain->off - pos.internal_.pos_in_chain;
+ chain = pos.internal_.chain = chain->next;
+ pos.internal_.pos_in_chain = 0;
+ }
+ }
+
+not_found:
+ PTR_NOT_FOUND(&pos);
+done:
+ EVBUFFER_UNLOCK(buffer);
+ return pos;
+}
+
+int
+evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
+ struct evbuffer_ptr *start_at,
+ struct evbuffer_iovec *vec, int n_vec)
+{
+ struct evbuffer_chain *chain;
+ int idx = 0;
+ ev_ssize_t len_so_far = 0;
+
+ /* Avoid locking in trivial edge cases */
+ if (start_at && start_at->internal_.chain == NULL)
+ return 0;
+
+ EVBUFFER_LOCK(buffer);
+
+ if (start_at) {
+ chain = start_at->internal_.chain;
+ len_so_far = chain->off
+ - start_at->internal_.pos_in_chain;
+ idx = 1;
+ if (n_vec > 0) {
+ vec[0].iov_base = chain->buffer + chain->misalign
+ + start_at->internal_.pos_in_chain;
+ vec[0].iov_len = len_so_far;
+ }
+ chain = chain->next;
+ } else {
+ chain = buffer->first;
+ }
+
+ if (n_vec == 0 && len < 0) {
+ /* If no vectors are provided and they asked for "everything",
+ * pretend they asked for the actual available amount. */
+ len = buffer->total_len;
+ if (start_at) {
+ len -= start_at->pos;
+ }
+ }
+
+ while (chain) {
+ if (len >= 0 && len_so_far >= len)
+ break;
+ if (idx<n_vec) {
+ vec[idx].iov_base = chain->buffer + chain->misalign;
+ vec[idx].iov_len = chain->off;
+ } else if (len<0) {
+ break;
+ }
+ ++idx;
+ len_so_far += chain->off;
+ chain = chain->next;
+ }
+
+ EVBUFFER_UNLOCK(buffer);
+
+ return idx;
+}
+
+
+int
+evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
+{
+ char *buffer;
+ size_t space;
+ int sz, result = -1;
+ va_list aq;
+ struct evbuffer_chain *chain;
+
+
+ EVBUFFER_LOCK(buf);
+
+ if (buf->freeze_end) {
+ goto done;
+ }
+
+ /* make sure that at least some space is available */
+ if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL)
+ goto done;
+
+ for (;;) {
+#if 0
+ size_t used = chain->misalign + chain->off;
+ buffer = (char *)chain->buffer + chain->misalign + chain->off;
+ EVUTIL_ASSERT(chain->buffer_len >= used);
+ space = chain->buffer_len - used;
+#endif
+ buffer = (char*) CHAIN_SPACE_PTR(chain);
+ space = (size_t) CHAIN_SPACE_LEN(chain);
+
+#ifndef va_copy
+#define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list))
+#endif
+ va_copy(aq, ap);
+
+ sz = evutil_vsnprintf(buffer, space, fmt, aq);
+
+ va_end(aq);
+
+ if (sz < 0)
+ goto done;
+ if (INT_MAX >= EVBUFFER_CHAIN_MAX &&
+ (size_t)sz >= EVBUFFER_CHAIN_MAX)
+ goto done;
+ if ((size_t)sz < space) {
+ chain->off += sz;
+ buf->total_len += sz;
+ buf->n_add_for_cb += sz;
+
+ advance_last_with_data(buf);
+ evbuffer_invoke_callbacks_(buf);
+ result = sz;
+ goto done;
+ }
+ if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL)
+ goto done;
+ }
+ /* NOTREACHED */
+
+done:
+ EVBUFFER_UNLOCK(buf);
+ return result;
+}
+
+int
+evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
+{
+ int res = -1;
+ va_list ap;
+
+ va_start(ap, fmt);
+ res = evbuffer_add_vprintf(buf, fmt, ap);
+ va_end(ap);
+
+ return (res);
+}
+
+int
+evbuffer_add_reference(struct evbuffer *outbuf,
+ const void *data, size_t datlen,
+ evbuffer_ref_cleanup_cb cleanupfn, void *extra)
+{
+ struct evbuffer_chain *chain;
+ struct evbuffer_chain_reference *info;
+ int result = -1;
+
+ chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference));
+ if (!chain)
+ return (-1);
+ chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE;
+ chain->buffer = (unsigned char *)data;
+ chain->buffer_len = datlen;
+ chain->off = datlen;
+
+ info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain);
+ info->cleanupfn = cleanupfn;
+ info->extra = extra;
+
+ EVBUFFER_LOCK(outbuf);
+ if (outbuf->freeze_end) {
+ /* don't call chain_free; we do not want to actually invoke
+ * the cleanup function */
+ mm_free(chain);
+ goto done;
+ }
+ evbuffer_chain_insert(outbuf, chain);
+ outbuf->n_add_for_cb += datlen;
+
+ evbuffer_invoke_callbacks_(outbuf);
+
+ result = 0;
+done:
+ EVBUFFER_UNLOCK(outbuf);
+
+ return result;
+}
+
+/* TODO(niels): we may want to add to automagically convert to mmap, in
+ * case evbuffer_remove() or evbuffer_pullup() are being used.
+ */
+struct evbuffer_file_segment *
+evbuffer_file_segment_new(
+ int fd, ev_off_t offset, ev_off_t length, unsigned flags)
+{
+ struct evbuffer_file_segment *seg =
+ mm_calloc(sizeof(struct evbuffer_file_segment), 1);
+ if (!seg)
+ return NULL;
+ seg->refcnt = 1;
+ seg->fd = fd;
+ seg->flags = flags;
+ seg->file_offset = offset;
+ seg->cleanup_cb = NULL;
+ seg->cleanup_cb_arg = NULL;
+#ifdef _WIN32
+#ifndef lseek
+#define lseek _lseeki64
+#endif
+#ifndef fstat
+#define fstat _fstat
+#endif
+#ifndef stat
+#define stat _stat
+#endif
+#endif
+ if (length == -1) {
+ struct stat st;
+ if (fstat(fd, &st) < 0)
+ goto err;
+ length = st.st_size;
+ }
+ seg->length = length;
+
+ if (offset < 0 || length < 0 ||
+ ((ev_uint64_t)length > EVBUFFER_CHAIN_MAX) ||
+ (ev_uint64_t)offset > (ev_uint64_t)(EVBUFFER_CHAIN_MAX - length))
+ goto err;
+
+#if defined(USE_SENDFILE)
+ if (!(flags & EVBUF_FS_DISABLE_SENDFILE)) {
+ seg->can_sendfile = 1;
+ goto done;
+ }
+#endif
+
+ if (evbuffer_file_segment_materialize(seg)<0)
+ goto err;
+
+#if defined(USE_SENDFILE)
+done:
+#endif
+ if (!(flags & EVBUF_FS_DISABLE_LOCKING)) {
+ EVTHREAD_ALLOC_LOCK(seg->lock, 0);
+ }
+ return seg;
+err:
+ mm_free(seg);
+ return NULL;
+}
+
+#ifdef EVENT__HAVE_MMAP
+static long
+get_page_size(void)
+{
+#ifdef SC_PAGE_SIZE
+ return sysconf(SC_PAGE_SIZE);
+#elif defined(_SC_PAGE_SIZE)
+ return sysconf(_SC_PAGE_SIZE);
+#else
+ return 1;
+#endif
+}
+#endif
+
+/* DOCDOC */
+/* Requires lock */
+static int
+evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg)
+{
+ const unsigned flags = seg->flags;
+ const int fd = seg->fd;
+ const ev_off_t length = seg->length;
+ const ev_off_t offset = seg->file_offset;
+
+ if (seg->contents)
+ return 0; /* already materialized */
+
+#if defined(EVENT__HAVE_MMAP)
+ if (!(flags & EVBUF_FS_DISABLE_MMAP)) {
+ off_t offset_rounded = 0, offset_leftover = 0;
+ void *mapped;
+ if (offset) {
+ /* mmap implementations don't generally like us
+ * to have an offset that isn't a round */
+ long page_size = get_page_size();
+ if (page_size == -1)
+ goto err;
+ offset_leftover = offset % page_size;
+ offset_rounded = offset - offset_leftover;
+ }
+ mapped = mmap(NULL, length + offset_leftover,
+ PROT_READ,
+#ifdef MAP_NOCACHE
+ MAP_NOCACHE | /* ??? */
+#endif
+#ifdef MAP_FILE
+ MAP_FILE |
+#endif
+ MAP_PRIVATE,
+ fd, offset_rounded);
+ if (mapped == MAP_FAILED) {
+ event_warn("%s: mmap(%d, %d, %zu) failed",
+ __func__, fd, 0, (size_t)(offset + length));
+ } else {
+ seg->mapping = mapped;
+ seg->contents = (char*)mapped+offset_leftover;
+ seg->mmap_offset = 0;
+ seg->is_mapping = 1;
+ goto done;
+ }
+ }
+#endif
+#ifdef _WIN32
+ if (!(flags & EVBUF_FS_DISABLE_MMAP)) {
+ intptr_t h = _get_osfhandle(fd);
+ HANDLE m;
+ ev_uint64_t total_size = length+offset;
+ if ((HANDLE)h == INVALID_HANDLE_VALUE)
+ goto err;
+ m = CreateFileMapping((HANDLE)h, NULL, PAGE_READONLY,
+ (total_size >> 32), total_size & 0xfffffffful,
+ NULL);
+ if (m != INVALID_HANDLE_VALUE) { /* Does h leak? */
+ seg->mapping_handle = m;
+ seg->mmap_offset = offset;
+ seg->is_mapping = 1;
+ goto done;
+ }
+ }
+#endif
+ {
+ ev_off_t start_pos = lseek(fd, 0, SEEK_CUR), pos;
+ ev_off_t read_so_far = 0;
+ char *mem;
+ int e;
+ ev_ssize_t n = 0;
+ if (!(mem = mm_malloc(length)))
+ goto err;
+ if (start_pos < 0) {
+ mm_free(mem);
+ goto err;
+ }
+ if (lseek(fd, offset, SEEK_SET) < 0) {
+ mm_free(mem);
+ goto err;
+ }
+ while (read_so_far < length) {
+ n = read(fd, mem+read_so_far, length-read_so_far);
+ if (n <= 0)
+ break;
+ read_so_far += n;
+ }
+
+ e = errno;
+ pos = lseek(fd, start_pos, SEEK_SET);
+ if (n < 0 || (n == 0 && length > read_so_far)) {
+ mm_free(mem);
+ errno = e;
+ goto err;
+ } else if (pos < 0) {
+ mm_free(mem);
+ goto err;
+ }
+
+ seg->contents = mem;
+ }
+
+done:
+ return 0;
+err:
+ return -1;
+}
+
+void evbuffer_file_segment_add_cleanup_cb(struct evbuffer_file_segment *seg,
+ evbuffer_file_segment_cleanup_cb cb, void* arg)
+{
+ EVUTIL_ASSERT(seg->refcnt > 0);
+ seg->cleanup_cb = cb;
+ seg->cleanup_cb_arg = arg;
+}
+
+void
+evbuffer_file_segment_free(struct evbuffer_file_segment *seg)
+{
+ int refcnt;
+ EVLOCK_LOCK(seg->lock, 0);
+ refcnt = --seg->refcnt;
+ EVLOCK_UNLOCK(seg->lock, 0);
+ if (refcnt > 0)
+ return;
+ EVUTIL_ASSERT(refcnt == 0);
+
+ if (seg->is_mapping) {
+#ifdef _WIN32
+ CloseHandle(seg->mapping_handle);
+#elif defined (EVENT__HAVE_MMAP)
+ off_t offset_leftover;
+ offset_leftover = seg->file_offset % get_page_size();
+ if (munmap(seg->mapping, seg->length + offset_leftover) == -1)
+ event_warn("%s: munmap failed", __func__);
+#endif
+ } else if (seg->contents) {
+ mm_free(seg->contents);
+ }
+
+ if ((seg->flags & EVBUF_FS_CLOSE_ON_FREE) && seg->fd >= 0) {
+ close(seg->fd);
+ }
+
+ if (seg->cleanup_cb) {
+ (*seg->cleanup_cb)((struct evbuffer_file_segment const*)seg,
+ seg->flags, seg->cleanup_cb_arg);
+ seg->cleanup_cb = NULL;
+ seg->cleanup_cb_arg = NULL;
+ }
+
+ EVTHREAD_FREE_LOCK(seg->lock, 0);
+ mm_free(seg);
+}
+
+int
+evbuffer_add_file_segment(struct evbuffer *buf,
+ struct evbuffer_file_segment *seg, ev_off_t offset, ev_off_t length)
+{
+ struct evbuffer_chain *chain;
+ struct evbuffer_chain_file_segment *extra;
+ int can_use_sendfile = 0;
+
+ EVBUFFER_LOCK(buf);
+ EVLOCK_LOCK(seg->lock, 0);
+ if (buf->flags & EVBUFFER_FLAG_DRAINS_TO_FD) {
+ can_use_sendfile = 1;
+ } else {
+ if (!seg->contents) {
+ if (evbuffer_file_segment_materialize(seg)<0) {
+ EVLOCK_UNLOCK(seg->lock, 0);
+ EVBUFFER_UNLOCK(buf);
+ return -1;
+ }
+ }
+ }
+ ++seg->refcnt;
+ EVLOCK_UNLOCK(seg->lock, 0);
+
+ if (buf->freeze_end)
+ goto err;
+
+ if (length < 0) {
+ if (offset > seg->length)
+ goto err;
+ length = seg->length - offset;
+ }
+
+ /* Can we actually add this? */
+ if (offset+length > seg->length)
+ goto err;
+
+ chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_file_segment));
+ if (!chain)
+ goto err;
+ extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, chain);
+
+ chain->flags |= EVBUFFER_IMMUTABLE|EVBUFFER_FILESEGMENT;
+ if (can_use_sendfile && seg->can_sendfile) {
+ chain->flags |= EVBUFFER_SENDFILE;
+ chain->misalign = seg->file_offset + offset;
+ chain->off = length;
+ chain->buffer_len = chain->misalign + length;
+ } else if (seg->is_mapping) {
+#ifdef _WIN32
+ ev_uint64_t total_offset = seg->mmap_offset+offset;
+ ev_uint64_t offset_rounded=0, offset_remaining=0;
+ LPVOID data;
+ if (total_offset) {
+ SYSTEM_INFO si;
+ memset(&si, 0, sizeof(si)); /* cargo cult */
+ GetSystemInfo(&si);
+ offset_remaining = total_offset % si.dwAllocationGranularity;
+ offset_rounded = total_offset - offset_remaining;
+ }
+ data = MapViewOfFile(
+ seg->mapping_handle,
+ FILE_MAP_READ,
+ offset_rounded >> 32,
+ offset_rounded & 0xfffffffful,
+ length + offset_remaining);
+ if (data == NULL) {
+ mm_free(chain);
+ goto err;
+ }
+ chain->buffer = (unsigned char*) data;
+ chain->buffer_len = length+offset_remaining;
+ chain->misalign = offset_remaining;
+ chain->off = length;
+#else
+ chain->buffer = (unsigned char*)(seg->contents + offset);
+ chain->buffer_len = length;
+ chain->off = length;
+#endif
+ } else {
+ chain->buffer = (unsigned char*)(seg->contents + offset);
+ chain->buffer_len = length;
+ chain->off = length;
+ }
+
+ extra->segment = seg;
+ buf->n_add_for_cb += length;
+ evbuffer_chain_insert(buf, chain);
+
+ evbuffer_invoke_callbacks_(buf);
+
+ EVBUFFER_UNLOCK(buf);
+
+ return 0;
+err:
+ EVBUFFER_UNLOCK(buf);
+ evbuffer_file_segment_free(seg); /* Lowers the refcount */
+ return -1;
+}
+
+int
+evbuffer_add_file(struct evbuffer *buf, int fd, ev_off_t offset, ev_off_t length)
+{
+ struct evbuffer_file_segment *seg;
+ unsigned flags = EVBUF_FS_CLOSE_ON_FREE;
+ int r;
+
+ seg = evbuffer_file_segment_new(fd, offset, length, flags);
+ if (!seg)
+ return -1;
+ r = evbuffer_add_file_segment(buf, seg, 0, length);
+ if (r == 0)
+ evbuffer_file_segment_free(seg);
+ return r;
+}
+
+void
+evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg)
+{
+ EVBUFFER_LOCK(buffer);
+
+ if (!LIST_EMPTY(&buffer->callbacks))
+ evbuffer_remove_all_callbacks(buffer);
+
+ if (cb) {
+ struct evbuffer_cb_entry *ent =
+ evbuffer_add_cb(buffer, NULL, cbarg);
+ ent->cb.cb_obsolete = cb;
+ ent->flags |= EVBUFFER_CB_OBSOLETE;
+ }
+ EVBUFFER_UNLOCK(buffer);
+}
+
+struct evbuffer_cb_entry *
+evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
+{
+ struct evbuffer_cb_entry *e;
+ if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry))))
+ return NULL;
+ EVBUFFER_LOCK(buffer);
+ e->cb.cb_func = cb;
+ e->cbarg = cbarg;
+ e->flags = EVBUFFER_CB_ENABLED;
+ LIST_INSERT_HEAD(&buffer->callbacks, e, next);
+ EVBUFFER_UNLOCK(buffer);
+ return e;
+}
+
+int
+evbuffer_remove_cb_entry(struct evbuffer *buffer,
+ struct evbuffer_cb_entry *ent)
+{
+ EVBUFFER_LOCK(buffer);
+ LIST_REMOVE(ent, next);
+ EVBUFFER_UNLOCK(buffer);
+ mm_free(ent);
+ return 0;
+}
+
+int
+evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
+{
+ struct evbuffer_cb_entry *cbent;
+ int result = -1;
+ EVBUFFER_LOCK(buffer);
+ LIST_FOREACH(cbent, &buffer->callbacks, next) {
+ if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) {
+ result = evbuffer_remove_cb_entry(buffer, cbent);
+ goto done;
+ }
+ }
+done:
+ EVBUFFER_UNLOCK(buffer);
+ return result;
+}
+
+int
+evbuffer_cb_set_flags(struct evbuffer *buffer,
+ struct evbuffer_cb_entry *cb, ev_uint32_t flags)
+{
+ /* the user isn't allowed to mess with these. */
+ flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
+ EVBUFFER_LOCK(buffer);
+ cb->flags |= flags;
+ EVBUFFER_UNLOCK(buffer);
+ return 0;
+}
+
+int
+evbuffer_cb_clear_flags(struct evbuffer *buffer,
+ struct evbuffer_cb_entry *cb, ev_uint32_t flags)
+{
+ /* the user isn't allowed to mess with these. */
+ flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
+ EVBUFFER_LOCK(buffer);
+ cb->flags &= ~flags;
+ EVBUFFER_UNLOCK(buffer);
+ return 0;
+}
+
+int
+evbuffer_freeze(struct evbuffer *buffer, int start)
+{
+ EVBUFFER_LOCK(buffer);
+ if (start)
+ buffer->freeze_start = 1;
+ else
+ buffer->freeze_end = 1;
+ EVBUFFER_UNLOCK(buffer);
+ return 0;
+}
+
+int
+evbuffer_unfreeze(struct evbuffer *buffer, int start)
+{
+ EVBUFFER_LOCK(buffer);
+ if (start)
+ buffer->freeze_start = 0;
+ else
+ buffer->freeze_end = 0;
+ EVBUFFER_UNLOCK(buffer);
+ return 0;
+}
+
+#if 0
+void
+evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
+{
+ if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) {
+ cb->size_before_suspend = evbuffer_get_length(buffer);
+ cb->flags |= EVBUFFER_CB_SUSPENDED;
+ }
+}
+
+void
+evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
+{
+ if ((cb->flags & EVBUFFER_CB_SUSPENDED)) {
+ unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND);
+ size_t sz = cb->size_before_suspend;
+ cb->flags &= ~(EVBUFFER_CB_SUSPENDED|
+ EVBUFFER_CB_CALL_ON_UNSUSPEND);
+ cb->size_before_suspend = 0;
+ if (call && (cb->flags & EVBUFFER_CB_ENABLED)) {
+ cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg);
+ }
+ }
+}
+#endif
+
+int
+evbuffer_get_callbacks_(struct evbuffer *buffer, struct event_callback **cbs,
+ int max_cbs)
+{
+ int r = 0;
+ EVBUFFER_LOCK(buffer);
+ if (buffer->deferred_cbs) {
+ if (max_cbs < 1) {
+ r = -1;
+ goto done;
+ }
+ cbs[0] = &buffer->deferred;
+ r = 1;
+ }
+done:
+ EVBUFFER_UNLOCK(buffer);
+ return r;
+}
diff --git a/libs/libevent/src/buffer_iocp.c b/libs/libevent/src/buffer_iocp.c
new file mode 100644
index 0000000000..2d76a90e77
--- /dev/null
+++ b/libs/libevent/src/buffer_iocp.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ @file buffer_iocp.c
+
+ This module implements overlapped read and write functions for evbuffer
+ objects on Windows.
+*/
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include "event2/buffer.h"
+#include "event2/buffer_compat.h"
+#include "event2/util.h"
+#include "event2/thread.h"
+#include "util-internal.h"
+#include "evthread-internal.h"
+#include "evbuffer-internal.h"
+#include "iocp-internal.h"
+#include "mm-internal.h"
+
+#include <winsock2.h>
+#include <windows.h>
+#include <stdio.h>
+
+#define MAX_WSABUFS 16
+
+/** An evbuffer that can handle overlapped IO. */
+struct evbuffer_overlapped {
+ struct evbuffer buffer;
+ /** The socket that we're doing overlapped IO on. */
+ evutil_socket_t fd;
+
+ /** pending I/O type */
+ unsigned read_in_progress : 1;
+ unsigned write_in_progress : 1;
+
+ /** The first pinned chain in the buffer. */
+ struct evbuffer_chain *first_pinned;
+
+ /** How many chains are pinned; how many of the fields in buffers
+ * are we using. */
+ int n_buffers;
+ WSABUF buffers[MAX_WSABUFS];
+};
+
+/** Given an evbuffer, return the correponding evbuffer structure, or NULL if
+ * the evbuffer isn't overlapped. */
+static inline struct evbuffer_overlapped *
+upcast_evbuffer(struct evbuffer *buf)
+{
+ if (!buf || !buf->is_overlapped)
+ return NULL;
+ return EVUTIL_UPCAST(buf, struct evbuffer_overlapped, buffer);
+}
+
+/** Unpin all the chains noted as pinned in 'eo'. */
+static void
+pin_release(struct evbuffer_overlapped *eo, unsigned flag)
+{
+ int i;
+ struct evbuffer_chain *next, *chain = eo->first_pinned;
+
+ for (i = 0; i < eo->n_buffers; ++i) {
+ EVUTIL_ASSERT(chain);
+ next = chain->next;
+ evbuffer_chain_unpin_(chain, flag);
+ chain = next;
+ }
+}
+
+void
+evbuffer_commit_read_(struct evbuffer *evbuf, ev_ssize_t nBytes)
+{
+ struct evbuffer_overlapped *buf = upcast_evbuffer(evbuf);
+ struct evbuffer_chain **chainp;
+ size_t remaining, len;
+ unsigned i;
+
+ EVBUFFER_LOCK(evbuf);
+ EVUTIL_ASSERT(buf->read_in_progress && !buf->write_in_progress);
+ EVUTIL_ASSERT(nBytes >= 0); /* XXXX Can this be false? */
+
+ evbuffer_unfreeze(evbuf, 0);
+
+ chainp = evbuf->last_with_datap;
+ if (!((*chainp)->flags & EVBUFFER_MEM_PINNED_R))
+ chainp = &(*chainp)->next;
+ remaining = nBytes;
+ for (i = 0; remaining > 0 && i < (unsigned)buf->n_buffers; ++i) {
+ EVUTIL_ASSERT(*chainp);
+ len = buf->buffers[i].len;
+ if (remaining < len)
+ len = remaining;
+ (*chainp)->off += len;
+ evbuf->last_with_datap = chainp;
+ remaining -= len;
+ chainp = &(*chainp)->next;
+ }
+
+ pin_release(buf, EVBUFFER_MEM_PINNED_R);
+
+ buf->read_in_progress = 0;
+
+ evbuf->total_len += nBytes;
+ evbuf->n_add_for_cb += nBytes;
+
+ evbuffer_invoke_callbacks_(evbuf);
+
+ evbuffer_decref_and_unlock_(evbuf);
+}
+
+void
+evbuffer_commit_write_(struct evbuffer *evbuf, ev_ssize_t nBytes)
+{
+ struct evbuffer_overlapped *buf = upcast_evbuffer(evbuf);
+
+ EVBUFFER_LOCK(evbuf);
+ EVUTIL_ASSERT(buf->write_in_progress && !buf->read_in_progress);
+ evbuffer_unfreeze(evbuf, 1);
+ evbuffer_drain(evbuf, nBytes);
+ pin_release(buf,EVBUFFER_MEM_PINNED_W);
+ buf->write_in_progress = 0;
+ evbuffer_decref_and_unlock_(evbuf);
+}
+
+struct evbuffer *
+evbuffer_overlapped_new_(evutil_socket_t fd)
+{
+ struct evbuffer_overlapped *evo;
+
+ evo = mm_calloc(1, sizeof(struct evbuffer_overlapped));
+ if (!evo)
+ return NULL;
+
+ LIST_INIT(&evo->buffer.callbacks);
+ evo->buffer.refcnt = 1;
+ evo->buffer.last_with_datap = &evo->buffer.first;
+
+ evo->buffer.is_overlapped = 1;
+ evo->fd = fd;
+
+ return &evo->buffer;
+}
+
+int
+evbuffer_launch_write_(struct evbuffer *buf, ev_ssize_t at_most,
+ struct event_overlapped *ol)
+{
+ struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
+ int r = -1;
+ int i;
+ struct evbuffer_chain *chain;
+ DWORD bytesSent;
+
+ if (!buf) {
+ /* No buffer, or it isn't overlapped */
+ return -1;
+ }
+
+ EVBUFFER_LOCK(buf);
+ EVUTIL_ASSERT(!buf_o->read_in_progress);
+ if (buf->freeze_start || buf_o->write_in_progress)
+ goto done;
+ if (!buf->total_len) {
+ /* Nothing to write */
+ r = 0;
+ goto done;
+ } else if (at_most < 0 || (size_t)at_most > buf->total_len) {
+ at_most = buf->total_len;
+ }
+ evbuffer_freeze(buf, 1);
+
+ buf_o->first_pinned = NULL;
+ buf_o->n_buffers = 0;
+ memset(buf_o->buffers, 0, sizeof(buf_o->buffers));
+
+ chain = buf_o->first_pinned = buf->first;
+
+ for (i=0; i < MAX_WSABUFS && chain; ++i, chain=chain->next) {
+ WSABUF *b = &buf_o->buffers[i];
+ b->buf = (char*)( chain->buffer + chain->misalign );
+ evbuffer_chain_pin_(chain, EVBUFFER_MEM_PINNED_W);
+
+ if ((size_t)at_most > chain->off) {
+ /* XXXX Cast is safe for now, since win32 has no
+ mmaped chains. But later, we need to have this
+ add more WSAbufs if chain->off is greater than
+ ULONG_MAX */
+ b->len = (unsigned long)chain->off;
+ at_most -= chain->off;
+ } else {
+ b->len = (unsigned long)at_most;
+ ++i;
+ break;
+ }
+ }
+
+ buf_o->n_buffers = i;
+ evbuffer_incref_(buf);
+ if (WSASend(buf_o->fd, buf_o->buffers, i, &bytesSent, 0,
+ &ol->overlapped, NULL)) {
+ int error = WSAGetLastError();
+ if (error != WSA_IO_PENDING) {
+ /* An actual error. */
+ pin_release(buf_o, EVBUFFER_MEM_PINNED_W);
+ evbuffer_unfreeze(buf, 1);
+ evbuffer_free(buf); /* decref */
+ goto done;
+ }
+ }
+
+ buf_o->write_in_progress = 1;
+ r = 0;
+done:
+ EVBUFFER_UNLOCK(buf);
+ return r;
+}
+
+int
+evbuffer_launch_read_(struct evbuffer *buf, size_t at_most,
+ struct event_overlapped *ol)
+{
+ struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
+ int r = -1, i;
+ int nvecs;
+ int npin=0;
+ struct evbuffer_chain *chain=NULL, **chainp;
+ DWORD bytesRead;
+ DWORD flags = 0;
+ struct evbuffer_iovec vecs[MAX_WSABUFS];
+
+ if (!buf_o)
+ return -1;
+ EVBUFFER_LOCK(buf);
+ EVUTIL_ASSERT(!buf_o->write_in_progress);
+ if (buf->freeze_end || buf_o->read_in_progress)
+ goto done;
+
+ buf_o->first_pinned = NULL;
+ buf_o->n_buffers = 0;
+ memset(buf_o->buffers, 0, sizeof(buf_o->buffers));
+
+ if (evbuffer_expand_fast_(buf, at_most, MAX_WSABUFS) == -1)
+ goto done;
+ evbuffer_freeze(buf, 0);
+
+ nvecs = evbuffer_read_setup_vecs_(buf, at_most,
+ vecs, MAX_WSABUFS, &chainp, 1);
+ for (i=0;i<nvecs;++i) {
+ WSABUF_FROM_EVBUFFER_IOV(
+ &buf_o->buffers[i],
+ &vecs[i]);
+ }
+
+ buf_o->n_buffers = nvecs;
+ buf_o->first_pinned = chain = *chainp;
+
+ npin=0;
+ for ( ; chain; chain = chain->next) {
+ evbuffer_chain_pin_(chain, EVBUFFER_MEM_PINNED_R);
+ ++npin;
+ }
+ EVUTIL_ASSERT(npin == nvecs);
+
+ evbuffer_incref_(buf);
+ if (WSARecv(buf_o->fd, buf_o->buffers, nvecs, &bytesRead, &flags,
+ &ol->overlapped, NULL)) {
+ int error = WSAGetLastError();
+ if (error != WSA_IO_PENDING) {
+ /* An actual error. */
+ pin_release(buf_o, EVBUFFER_MEM_PINNED_R);
+ evbuffer_unfreeze(buf, 0);
+ evbuffer_free(buf); /* decref */
+ goto done;
+ }
+ }
+
+ buf_o->read_in_progress = 1;
+ r = 0;
+done:
+ EVBUFFER_UNLOCK(buf);
+ return r;
+}
+
+evutil_socket_t
+evbuffer_overlapped_get_fd_(struct evbuffer *buf)
+{
+ struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
+ return buf_o ? buf_o->fd : -1;
+}
+
+void
+evbuffer_overlapped_set_fd_(struct evbuffer *buf, evutil_socket_t fd)
+{
+ struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
+ EVBUFFER_LOCK(buf);
+ /* XXX is this right?, should it cancel current I/O operations? */
+ if (buf_o)
+ buf_o->fd = fd;
+ EVBUFFER_UNLOCK(buf);
+}
diff --git a/libs/libevent/src/bufferevent-internal.h b/libs/libevent/src/bufferevent-internal.h
new file mode 100644
index 0000000000..d9d9e66640
--- /dev/null
+++ b/libs/libevent/src/bufferevent-internal.h
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2008-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef BUFFEREVENT_INTERNAL_H_INCLUDED_
+#define BUFFEREVENT_INTERNAL_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "event2/event-config.h"
+#include "event2/event_struct.h"
+#include "evconfig-private.h"
+#include "event2/util.h"
+#include "defer-internal.h"
+#include "evthread-internal.h"
+#include "event2/thread.h"
+#include "ratelim-internal.h"
+#include "event2/bufferevent_struct.h"
+
+#include "ipv6-internal.h"
+#ifdef _WIN32
+#include <ws2tcpip.h>
+#endif
+#ifdef EVENT__HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+#ifdef EVENT__HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif
+
+/* These flags are reasons that we might be declining to actually enable
+ reading or writing on a bufferevent.
+ */
+
+/* On a all bufferevents, for reading: used when we have read up to the
+ watermark value.
+
+ On a filtering bufferevent, for writing: used when the underlying
+ bufferevent's write buffer has been filled up to its watermark
+ value.
+*/
+#define BEV_SUSPEND_WM 0x01
+/* On a base bufferevent: when we have emptied a bandwidth buckets */
+#define BEV_SUSPEND_BW 0x02
+/* On a base bufferevent: when we have emptied the group's bandwidth bucket. */
+#define BEV_SUSPEND_BW_GROUP 0x04
+/* On a socket bufferevent: can't do any operations while we're waiting for
+ * name lookup to finish. */
+#define BEV_SUSPEND_LOOKUP 0x08
+/* On a base bufferevent, for reading: used when a filter has choked this
+ * (underlying) bufferevent because it has stopped reading from it. */
+#define BEV_SUSPEND_FILT_READ 0x10
+
+typedef ev_uint16_t bufferevent_suspend_flags;
+
+struct bufferevent_rate_limit_group {
+ /** List of all members in the group */
+ LIST_HEAD(rlim_group_member_list, bufferevent_private) members;
+ /** Current limits for the group. */
+ struct ev_token_bucket rate_limit;
+ struct ev_token_bucket_cfg rate_limit_cfg;
+
+ /** True iff we don't want to read from any member of the group.until
+ * the token bucket refills. */
+ unsigned read_suspended : 1;
+ /** True iff we don't want to write from any member of the group.until
+ * the token bucket refills. */
+ unsigned write_suspended : 1;
+ /** True iff we were unable to suspend one of the bufferevents in the
+ * group for reading the last time we tried, and we should try
+ * again. */
+ unsigned pending_unsuspend_read : 1;
+ /** True iff we were unable to suspend one of the bufferevents in the
+ * group for writing the last time we tried, and we should try
+ * again. */
+ unsigned pending_unsuspend_write : 1;
+
+ /*@{*/
+ /** Total number of bytes read or written in this group since last
+ * reset. */
+ ev_uint64_t total_read;
+ ev_uint64_t total_written;
+ /*@}*/
+
+ /** The number of bufferevents in the group. */
+ int n_members;
+
+ /** The smallest number of bytes that any member of the group should
+ * be limited to read or write at a time. */
+ ev_ssize_t min_share;
+ ev_ssize_t configured_min_share;
+
+ /** Timeout event that goes off once a tick, when the bucket is ready
+ * to refill. */
+ struct event master_refill_event;
+
+ /** Seed for weak random number generator. Protected by 'lock' */
+ struct evutil_weakrand_state weakrand_seed;
+
+ /** Lock to protect the members of this group. This lock should nest
+ * within every bufferevent lock: if you are holding this lock, do
+ * not assume you can lock another bufferevent. */
+ void *lock;
+};
+
+/** Fields for rate-limiting a single bufferevent. */
+struct bufferevent_rate_limit {
+ /* Linked-list elements for storing this bufferevent_private in a
+ * group.
+ *
+ * Note that this field is supposed to be protected by the group
+ * lock */
+ LIST_ENTRY(bufferevent_private) next_in_group;
+ /** The rate-limiting group for this bufferevent, or NULL if it is
+ * only rate-limited on its own. */
+ struct bufferevent_rate_limit_group *group;
+
+ /* This bufferevent's current limits. */
+ struct ev_token_bucket limit;
+ /* Pointer to the rate-limit configuration for this bufferevent.
+ * Can be shared. XXX reference-count this? */
+ struct ev_token_bucket_cfg *cfg;
+
+ /* Timeout event used when one this bufferevent's buckets are
+ * empty. */
+ struct event refill_bucket_event;
+};
+
+/** Parts of the bufferevent structure that are shared among all bufferevent
+ * types, but not exposed in bufferevent_struct.h. */
+struct bufferevent_private {
+ /** The underlying bufferevent structure. */
+ struct bufferevent bev;
+
+ /** Evbuffer callback to enforce watermarks on input. */
+ struct evbuffer_cb_entry *read_watermarks_cb;
+
+ /** If set, we should free the lock when we free the bufferevent. */
+ unsigned own_lock : 1;
+
+ /** Flag: set if we have deferred callbacks and a read callback is
+ * pending. */
+ unsigned readcb_pending : 1;
+ /** Flag: set if we have deferred callbacks and a write callback is
+ * pending. */
+ unsigned writecb_pending : 1;
+ /** Flag: set if we are currently busy connecting. */
+ unsigned connecting : 1;
+ /** Flag: set if a connect failed prematurely; this is a hack for
+ * getting around the bufferevent abstraction. */
+ unsigned connection_refused : 1;
+ /** Set to the events pending if we have deferred callbacks and
+ * an events callback is pending. */
+ short eventcb_pending;
+
+ /** If set, read is suspended until one or more conditions are over.
+ * The actual value here is a bitfield of those conditions; see the
+ * BEV_SUSPEND_* flags above. */
+ bufferevent_suspend_flags read_suspended;
+
+ /** If set, writing is suspended until one or more conditions are over.
+ * The actual value here is a bitfield of those conditions; see the
+ * BEV_SUSPEND_* flags above. */
+ bufferevent_suspend_flags write_suspended;
+
+ /** Set to the current socket errno if we have deferred callbacks and
+ * an events callback is pending. */
+ int errno_pending;
+
+ /** The DNS error code for bufferevent_socket_connect_hostname */
+ int dns_error;
+
+ /** Used to implement deferred callbacks */
+ struct event_callback deferred;
+
+ /** The options this bufferevent was constructed with */
+ enum bufferevent_options options;
+
+ /** Current reference count for this bufferevent. */
+ int refcnt;
+
+ /** Lock for this bufferevent. Shared by the inbuf and the outbuf.
+ * If NULL, locking is disabled. */
+ void *lock;
+
+ /** No matter how big our bucket gets, don't try to read more than this
+ * much in a single read operation. */
+ ev_ssize_t max_single_read;
+
+ /** No matter how big our bucket gets, don't try to write more than this
+ * much in a single write operation. */
+ ev_ssize_t max_single_write;
+
+ /** Rate-limiting information for this bufferevent */
+ struct bufferevent_rate_limit *rate_limiting;
+
+ /* Saved conn_addr, to extract IP address from it.
+ *
+ * Because some servers may reset/close connection without waiting clients,
+ * in that case we can't extract IP address even in close_cb.
+ * So we need to save it, just after we connected to remote server, or
+ * after resolving (to avoid extra dns requests during retrying, since UDP
+ * is slow) */
+ union {
+ struct sockaddr_in6 in6;
+ struct sockaddr_in in;
+ } conn_address;
+};
+
+/** Possible operations for a control callback. */
+enum bufferevent_ctrl_op {
+ BEV_CTRL_SET_FD,
+ BEV_CTRL_GET_FD,
+ BEV_CTRL_GET_UNDERLYING,
+ BEV_CTRL_CANCEL_ALL
+};
+
+/** Possible data types for a control callback */
+union bufferevent_ctrl_data {
+ void *ptr;
+ evutil_socket_t fd;
+};
+
+/**
+ Implementation table for a bufferevent: holds function pointers and other
+ information to make the various bufferevent types work.
+*/
+struct bufferevent_ops {
+ /** The name of the bufferevent's type. */
+ const char *type;
+ /** At what offset into the implementation type will we find a
+ bufferevent structure?
+
+ Example: if the type is implemented as
+ struct bufferevent_x {
+ int extra_data;
+ struct bufferevent bev;
+ }
+ then mem_offset should be offsetof(struct bufferevent_x, bev)
+ */
+ off_t mem_offset;
+
+ /** Enables one or more of EV_READ|EV_WRITE on a bufferevent. Does
+ not need to adjust the 'enabled' field. Returns 0 on success, -1
+ on failure.
+ */
+ int (*enable)(struct bufferevent *, short);
+
+ /** Disables one or more of EV_READ|EV_WRITE on a bufferevent. Does
+ not need to adjust the 'enabled' field. Returns 0 on success, -1
+ on failure.
+ */
+ int (*disable)(struct bufferevent *, short);
+
+ /** Detatches the bufferevent from related data structures. Called as
+ * soon as its reference count reaches 0. */
+ void (*unlink)(struct bufferevent *);
+
+ /** Free any storage and deallocate any extra data or structures used
+ in this implementation. Called when the bufferevent is
+ finalized.
+ */
+ void (*destruct)(struct bufferevent *);
+
+ /** Called when the timeouts on the bufferevent have changed.*/
+ int (*adj_timeouts)(struct bufferevent *);
+
+ /** Called to flush data. */
+ int (*flush)(struct bufferevent *, short, enum bufferevent_flush_mode);
+
+ /** Called to access miscellaneous fields. */
+ int (*ctrl)(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
+
+};
+
+extern const struct bufferevent_ops bufferevent_ops_socket;
+extern const struct bufferevent_ops bufferevent_ops_filter;
+extern const struct bufferevent_ops bufferevent_ops_pair;
+
+#define BEV_IS_SOCKET(bevp) ((bevp)->be_ops == &bufferevent_ops_socket)
+#define BEV_IS_FILTER(bevp) ((bevp)->be_ops == &bufferevent_ops_filter)
+#define BEV_IS_PAIR(bevp) ((bevp)->be_ops == &bufferevent_ops_pair)
+
+#ifdef _WIN32
+extern const struct bufferevent_ops bufferevent_ops_async;
+#define BEV_IS_ASYNC(bevp) ((bevp)->be_ops == &bufferevent_ops_async)
+#else
+#define BEV_IS_ASYNC(bevp) 0
+#endif
+
+/** Initialize the shared parts of a bufferevent. */
+int bufferevent_init_common_(struct bufferevent_private *, struct event_base *, const struct bufferevent_ops *, enum bufferevent_options options);
+
+/** For internal use: temporarily stop all reads on bufev, until the conditions
+ * in 'what' are over. */
+void bufferevent_suspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what);
+/** For internal use: clear the conditions 'what' on bufev, and re-enable
+ * reading if there are no conditions left. */
+void bufferevent_unsuspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what);
+
+/** For internal use: temporarily stop all writes on bufev, until the conditions
+ * in 'what' are over. */
+void bufferevent_suspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what);
+/** For internal use: clear the conditions 'what' on bufev, and re-enable
+ * writing if there are no conditions left. */
+void bufferevent_unsuspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what);
+
+#define bufferevent_wm_suspend_read(b) \
+ bufferevent_suspend_read_((b), BEV_SUSPEND_WM)
+#define bufferevent_wm_unsuspend_read(b) \
+ bufferevent_unsuspend_read_((b), BEV_SUSPEND_WM)
+
+/*
+ Disable a bufferevent. Equivalent to bufferevent_disable(), but
+ first resets 'connecting' flag to force EV_WRITE down for sure.
+
+ XXXX this method will go away in the future; try not to add new users.
+ See comment in evhttp_connection_reset_() for discussion.
+
+ @param bufev the bufferevent to be disabled
+ @param event any combination of EV_READ | EV_WRITE.
+ @return 0 if successful, or -1 if an error occurred
+ @see bufferevent_disable()
+ */
+int bufferevent_disable_hard_(struct bufferevent *bufev, short event);
+
+/** Internal: Set up locking on a bufferevent. If lock is set, use it.
+ * Otherwise, use a new lock. */
+int bufferevent_enable_locking_(struct bufferevent *bufev, void *lock);
+/** Internal: backwards compat macro for the now public function
+ * Increment the reference count on bufev. */
+#define bufferevent_incref_(bufev) bufferevent_incref(bufev)
+/** Internal: Lock bufev and increase its reference count.
+ * unlocking it otherwise. */
+void bufferevent_incref_and_lock_(struct bufferevent *bufev);
+/** Internal: backwards compat macro for the now public function
+ * Decrement the reference count on bufev. Returns 1 if it freed
+ * the bufferevent.*/
+#define bufferevent_decref_(bufev) bufferevent_decref(bufev)
+
+/** Internal: Drop the reference count on bufev, freeing as necessary, and
+ * unlocking it otherwise. Returns 1 if it freed the bufferevent. */
+int bufferevent_decref_and_unlock_(struct bufferevent *bufev);
+
+/** Internal: If callbacks are deferred and we have a read callback, schedule
+ * a readcb. Otherwise just run the readcb. Ignores watermarks. */
+void bufferevent_run_readcb_(struct bufferevent *bufev, int options);
+/** Internal: If callbacks are deferred and we have a write callback, schedule
+ * a writecb. Otherwise just run the writecb. Ignores watermarks. */
+void bufferevent_run_writecb_(struct bufferevent *bufev, int options);
+/** Internal: If callbacks are deferred and we have an eventcb, schedule
+ * it to run with events "what". Otherwise just run the eventcb.
+ * See bufferevent_trigger_event for meaning of "options". */
+void bufferevent_run_eventcb_(struct bufferevent *bufev, short what, int options);
+
+/** Internal: Run or schedule (if deferred or options contain
+ * BEV_TRIG_DEFER_CALLBACKS) I/O callbacks specified in iotype.
+ * Must already hold the bufev lock. Honors watermarks unless
+ * BEV_TRIG_IGNORE_WATERMARKS is in options. */
+static inline void bufferevent_trigger_nolock_(struct bufferevent *bufev, short iotype, int options);
+
+/* Making this inline since all of the common-case calls to this function in
+ * libevent use constant arguments. */
+static inline void
+bufferevent_trigger_nolock_(struct bufferevent *bufev, short iotype, int options)
+{
+ if ((iotype & EV_READ) && ((options & BEV_TRIG_IGNORE_WATERMARKS) ||
+ evbuffer_get_length(bufev->input) >= bufev->wm_read.low))
+ bufferevent_run_readcb_(bufev, options);
+ if ((iotype & EV_WRITE) && ((options & BEV_TRIG_IGNORE_WATERMARKS) ||
+ evbuffer_get_length(bufev->output) <= bufev->wm_write.low))
+ bufferevent_run_writecb_(bufev, options);
+}
+
+/** Internal: Add the event 'ev' with timeout tv, unless tv is set to 0, in
+ * which case add ev with no timeout. */
+int bufferevent_add_event_(struct event *ev, const struct timeval *tv);
+
+/* =========
+ * These next functions implement timeouts for bufferevents that aren't doing
+ * anything else with ev_read and ev_write, to handle timeouts.
+ * ========= */
+/** Internal use: Set up the ev_read and ev_write callbacks so that
+ * the other "generic_timeout" functions will work on it. Call this from
+ * the constructor function. */
+void bufferevent_init_generic_timeout_cbs_(struct bufferevent *bev);
+/** Internal use: Add or delete the generic timeout events as appropriate.
+ * (If an event is enabled and a timeout is set, we add the event. Otherwise
+ * we delete it.) Call this from anything that changes the timeout values,
+ * that enabled EV_READ or EV_WRITE, or that disables EV_READ or EV_WRITE. */
+int bufferevent_generic_adj_timeouts_(struct bufferevent *bev);
+int bufferevent_generic_adj_existing_timeouts_(struct bufferevent *bev);
+
+enum bufferevent_options bufferevent_get_options_(struct bufferevent *bev);
+
+const struct sockaddr*
+bufferevent_socket_get_conn_address_(struct bufferevent *bev);
+
+/** Internal use: We have just successfully read data into an inbuf, so
+ * reset the read timeout (if any). */
+#define BEV_RESET_GENERIC_READ_TIMEOUT(bev) \
+ do { \
+ if (evutil_timerisset(&(bev)->timeout_read)) \
+ event_add(&(bev)->ev_read, &(bev)->timeout_read); \
+ } while (0)
+/** Internal use: We have just successfully written data from an inbuf, so
+ * reset the read timeout (if any). */
+#define BEV_RESET_GENERIC_WRITE_TIMEOUT(bev) \
+ do { \
+ if (evutil_timerisset(&(bev)->timeout_write)) \
+ event_add(&(bev)->ev_write, &(bev)->timeout_write); \
+ } while (0)
+#define BEV_DEL_GENERIC_READ_TIMEOUT(bev) \
+ event_del(&(bev)->ev_read)
+#define BEV_DEL_GENERIC_WRITE_TIMEOUT(bev) \
+ event_del(&(bev)->ev_write)
+
+
+/** Internal: Given a bufferevent, return its corresponding
+ * bufferevent_private. */
+#define BEV_UPCAST(b) EVUTIL_UPCAST((b), struct bufferevent_private, bev)
+
+#ifdef EVENT__DISABLE_THREAD_SUPPORT
+#define BEV_LOCK(b) EVUTIL_NIL_STMT_
+#define BEV_UNLOCK(b) EVUTIL_NIL_STMT_
+#else
+/** Internal: Grab the lock (if any) on a bufferevent */
+#define BEV_LOCK(b) do { \
+ struct bufferevent_private *locking = BEV_UPCAST(b); \
+ EVLOCK_LOCK(locking->lock, 0); \
+ } while (0)
+
+/** Internal: Release the lock (if any) on a bufferevent */
+#define BEV_UNLOCK(b) do { \
+ struct bufferevent_private *locking = BEV_UPCAST(b); \
+ EVLOCK_UNLOCK(locking->lock, 0); \
+ } while (0)
+#endif
+
+
+/* ==== For rate-limiting. */
+
+int bufferevent_decrement_write_buckets_(struct bufferevent_private *bev,
+ ev_ssize_t bytes);
+int bufferevent_decrement_read_buckets_(struct bufferevent_private *bev,
+ ev_ssize_t bytes);
+ev_ssize_t bufferevent_get_read_max_(struct bufferevent_private *bev);
+ev_ssize_t bufferevent_get_write_max_(struct bufferevent_private *bev);
+
+int bufferevent_ratelim_init_(struct bufferevent_private *bev);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* BUFFEREVENT_INTERNAL_H_INCLUDED_ */
diff --git a/libs/libevent/src/bufferevent.c b/libs/libevent/src/bufferevent.c
new file mode 100644
index 0000000000..59ae24f143
--- /dev/null
+++ b/libs/libevent/src/bufferevent.c
@@ -0,0 +1,1016 @@
+/*
+ * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos, Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef EVENT__HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+
+#ifdef _WIN32
+#include <winsock2.h>
+#endif
+#include <errno.h>
+
+#include "event2/util.h"
+#include "event2/buffer.h"
+#include "event2/buffer_compat.h"
+#include "event2/bufferevent.h"
+#include "event2/bufferevent_struct.h"
+#include "event2/bufferevent_compat.h"
+#include "event2/event.h"
+#include "event-internal.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "bufferevent-internal.h"
+#include "evbuffer-internal.h"
+#include "util-internal.h"
+
+static void bufferevent_cancel_all_(struct bufferevent *bev);
+static void bufferevent_finalize_cb_(struct event_callback *evcb, void *arg_);
+
+void
+bufferevent_suspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what)
+{
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ BEV_LOCK(bufev);
+ if (!bufev_private->read_suspended)
+ bufev->be_ops->disable(bufev, EV_READ);
+ bufev_private->read_suspended |= what;
+ BEV_UNLOCK(bufev);
+}
+
+void
+bufferevent_unsuspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what)
+{
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ BEV_LOCK(bufev);
+ bufev_private->read_suspended &= ~what;
+ if (!bufev_private->read_suspended && (bufev->enabled & EV_READ))
+ bufev->be_ops->enable(bufev, EV_READ);
+ BEV_UNLOCK(bufev);
+}
+
+void
+bufferevent_suspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what)
+{
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ BEV_LOCK(bufev);
+ if (!bufev_private->write_suspended)
+ bufev->be_ops->disable(bufev, EV_WRITE);
+ bufev_private->write_suspended |= what;
+ BEV_UNLOCK(bufev);
+}
+
+void
+bufferevent_unsuspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what)
+{
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ BEV_LOCK(bufev);
+ bufev_private->write_suspended &= ~what;
+ if (!bufev_private->write_suspended && (bufev->enabled & EV_WRITE))
+ bufev->be_ops->enable(bufev, EV_WRITE);
+ BEV_UNLOCK(bufev);
+}
+
+
+/* Callback to implement watermarks on the input buffer. Only enabled
+ * if the watermark is set. */
+static void
+bufferevent_inbuf_wm_cb(struct evbuffer *buf,
+ const struct evbuffer_cb_info *cbinfo,
+ void *arg)
+{
+ struct bufferevent *bufev = arg;
+ size_t size;
+
+ size = evbuffer_get_length(buf);
+
+ if (size >= bufev->wm_read.high)
+ bufferevent_wm_suspend_read(bufev);
+ else
+ bufferevent_wm_unsuspend_read(bufev);
+}
+
+static void
+bufferevent_run_deferred_callbacks_locked(struct event_callback *cb, void *arg)
+{
+ struct bufferevent_private *bufev_private = arg;
+ struct bufferevent *bufev = &bufev_private->bev;
+
+ BEV_LOCK(bufev);
+ if ((bufev_private->eventcb_pending & BEV_EVENT_CONNECTED) &&
+ bufev->errorcb) {
+ /* The "connected" happened before any reads or writes, so
+ send it first. */
+ bufev_private->eventcb_pending &= ~BEV_EVENT_CONNECTED;
+ bufev->errorcb(bufev, BEV_EVENT_CONNECTED, bufev->cbarg);
+ }
+ if (bufev_private->readcb_pending && bufev->readcb) {
+ bufev_private->readcb_pending = 0;
+ bufev->readcb(bufev, bufev->cbarg);
+ }
+ if (bufev_private->writecb_pending && bufev->writecb) {
+ bufev_private->writecb_pending = 0;
+ bufev->writecb(bufev, bufev->cbarg);
+ }
+ if (bufev_private->eventcb_pending && bufev->errorcb) {
+ short what = bufev_private->eventcb_pending;
+ int err = bufev_private->errno_pending;
+ bufev_private->eventcb_pending = 0;
+ bufev_private->errno_pending = 0;
+ EVUTIL_SET_SOCKET_ERROR(err);
+ bufev->errorcb(bufev, what, bufev->cbarg);
+ }
+ bufferevent_decref_and_unlock_(bufev);
+}
+
+static void
+bufferevent_run_deferred_callbacks_unlocked(struct event_callback *cb, void *arg)
+{
+ struct bufferevent_private *bufev_private = arg;
+ struct bufferevent *bufev = &bufev_private->bev;
+
+ BEV_LOCK(bufev);
+#define UNLOCKED(stmt) \
+ do { BEV_UNLOCK(bufev); stmt; BEV_LOCK(bufev); } while(0)
+
+ if ((bufev_private->eventcb_pending & BEV_EVENT_CONNECTED) &&
+ bufev->errorcb) {
+ /* The "connected" happened before any reads or writes, so
+ send it first. */
+ bufferevent_event_cb errorcb = bufev->errorcb;
+ void *cbarg = bufev->cbarg;
+ bufev_private->eventcb_pending &= ~BEV_EVENT_CONNECTED;
+ UNLOCKED(errorcb(bufev, BEV_EVENT_CONNECTED, cbarg));
+ }
+ if (bufev_private->readcb_pending && bufev->readcb) {
+ bufferevent_data_cb readcb = bufev->readcb;
+ void *cbarg = bufev->cbarg;
+ bufev_private->readcb_pending = 0;
+ UNLOCKED(readcb(bufev, cbarg));
+ }
+ if (bufev_private->writecb_pending && bufev->writecb) {
+ bufferevent_data_cb writecb = bufev->writecb;
+ void *cbarg = bufev->cbarg;
+ bufev_private->writecb_pending = 0;
+ UNLOCKED(writecb(bufev, cbarg));
+ }
+ if (bufev_private->eventcb_pending && bufev->errorcb) {
+ bufferevent_event_cb errorcb = bufev->errorcb;
+ void *cbarg = bufev->cbarg;
+ short what = bufev_private->eventcb_pending;
+ int err = bufev_private->errno_pending;
+ bufev_private->eventcb_pending = 0;
+ bufev_private->errno_pending = 0;
+ EVUTIL_SET_SOCKET_ERROR(err);
+ UNLOCKED(errorcb(bufev,what,cbarg));
+ }
+ bufferevent_decref_and_unlock_(bufev);
+#undef UNLOCKED
+}
+
+#define SCHEDULE_DEFERRED(bevp) \
+ do { \
+ if (event_deferred_cb_schedule_( \
+ (bevp)->bev.ev_base, \
+ &(bevp)->deferred)) \
+ bufferevent_incref_(&(bevp)->bev); \
+ } while (0)
+
+
+void
+bufferevent_run_readcb_(struct bufferevent *bufev, int options)
+{
+ /* Requires that we hold the lock and a reference */
+ struct bufferevent_private *p =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ if (bufev->readcb == NULL)
+ return;
+ if ((p->options|options) & BEV_OPT_DEFER_CALLBACKS) {
+ p->readcb_pending = 1;
+ SCHEDULE_DEFERRED(p);
+ } else {
+ bufev->readcb(bufev, bufev->cbarg);
+ }
+}
+
+void
+bufferevent_run_writecb_(struct bufferevent *bufev, int options)
+{
+ /* Requires that we hold the lock and a reference */
+ struct bufferevent_private *p =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ if (bufev->writecb == NULL)
+ return;
+ if ((p->options|options) & BEV_OPT_DEFER_CALLBACKS) {
+ p->writecb_pending = 1;
+ SCHEDULE_DEFERRED(p);
+ } else {
+ bufev->writecb(bufev, bufev->cbarg);
+ }
+}
+
+#define BEV_TRIG_ALL_OPTS ( \
+ BEV_TRIG_IGNORE_WATERMARKS| \
+ BEV_TRIG_DEFER_CALLBACKS \
+ )
+
+void
+bufferevent_trigger(struct bufferevent *bufev, short iotype, int options)
+{
+ bufferevent_incref_and_lock_(bufev);
+ bufferevent_trigger_nolock_(bufev, iotype, options&BEV_TRIG_ALL_OPTS);
+ bufferevent_decref_and_unlock_(bufev);
+}
+
+void
+bufferevent_run_eventcb_(struct bufferevent *bufev, short what, int options)
+{
+ /* Requires that we hold the lock and a reference */
+ struct bufferevent_private *p =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ if (bufev->errorcb == NULL)
+ return;
+ if ((p->options|options) & BEV_OPT_DEFER_CALLBACKS) {
+ p->eventcb_pending |= what;
+ p->errno_pending = EVUTIL_SOCKET_ERROR();
+ SCHEDULE_DEFERRED(p);
+ } else {
+ bufev->errorcb(bufev, what, bufev->cbarg);
+ }
+}
+
+void
+bufferevent_trigger_event(struct bufferevent *bufev, short what, int options)
+{
+ bufferevent_incref_and_lock_(bufev);
+ bufferevent_run_eventcb_(bufev, what, options&BEV_TRIG_ALL_OPTS);
+ bufferevent_decref_and_unlock_(bufev);
+}
+
+int
+bufferevent_init_common_(struct bufferevent_private *bufev_private,
+ struct event_base *base,
+ const struct bufferevent_ops *ops,
+ enum bufferevent_options options)
+{
+ struct bufferevent *bufev = &bufev_private->bev;
+
+ if (!bufev->input) {
+ if ((bufev->input = evbuffer_new()) == NULL)
+ return -1;
+ }
+
+ if (!bufev->output) {
+ if ((bufev->output = evbuffer_new()) == NULL) {
+ evbuffer_free(bufev->input);
+ return -1;
+ }
+ }
+
+ bufev_private->refcnt = 1;
+ bufev->ev_base = base;
+
+ /* Disable timeouts. */
+ evutil_timerclear(&bufev->timeout_read);
+ evutil_timerclear(&bufev->timeout_write);
+
+ bufev->be_ops = ops;
+
+ bufferevent_ratelim_init_(bufev_private);
+
+ /*
+ * Set to EV_WRITE so that using bufferevent_write is going to
+ * trigger a callback. Reading needs to be explicitly enabled
+ * because otherwise no data will be available.
+ */
+ bufev->enabled = EV_WRITE;
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (options & BEV_OPT_THREADSAFE) {
+ if (bufferevent_enable_locking_(bufev, NULL) < 0) {
+ /* cleanup */
+ evbuffer_free(bufev->input);
+ evbuffer_free(bufev->output);
+ bufev->input = NULL;
+ bufev->output = NULL;
+ return -1;
+ }
+ }
+#endif
+ if ((options & (BEV_OPT_DEFER_CALLBACKS|BEV_OPT_UNLOCK_CALLBACKS))
+ == BEV_OPT_UNLOCK_CALLBACKS) {
+ event_warnx("UNLOCK_CALLBACKS requires DEFER_CALLBACKS");
+ return -1;
+ }
+ if (options & BEV_OPT_UNLOCK_CALLBACKS)
+ event_deferred_cb_init_(
+ &bufev_private->deferred,
+ event_base_get_npriorities(base) / 2,
+ bufferevent_run_deferred_callbacks_unlocked,
+ bufev_private);
+ else
+ event_deferred_cb_init_(
+ &bufev_private->deferred,
+ event_base_get_npriorities(base) / 2,
+ bufferevent_run_deferred_callbacks_locked,
+ bufev_private);
+
+ bufev_private->options = options;
+
+ evbuffer_set_parent_(bufev->input, bufev);
+ evbuffer_set_parent_(bufev->output, bufev);
+
+ return 0;
+}
+
+void
+bufferevent_setcb(struct bufferevent *bufev,
+ bufferevent_data_cb readcb, bufferevent_data_cb writecb,
+ bufferevent_event_cb eventcb, void *cbarg)
+{
+ BEV_LOCK(bufev);
+
+ bufev->readcb = readcb;
+ bufev->writecb = writecb;
+ bufev->errorcb = eventcb;
+
+ bufev->cbarg = cbarg;
+ BEV_UNLOCK(bufev);
+}
+
+void
+bufferevent_getcb(struct bufferevent *bufev,
+ bufferevent_data_cb *readcb_ptr,
+ bufferevent_data_cb *writecb_ptr,
+ bufferevent_event_cb *eventcb_ptr,
+ void **cbarg_ptr)
+{
+ BEV_LOCK(bufev);
+ if (readcb_ptr)
+ *readcb_ptr = bufev->readcb;
+ if (writecb_ptr)
+ *writecb_ptr = bufev->writecb;
+ if (eventcb_ptr)
+ *eventcb_ptr = bufev->errorcb;
+ if (cbarg_ptr)
+ *cbarg_ptr = bufev->cbarg;
+
+ BEV_UNLOCK(bufev);
+}
+
+struct evbuffer *
+bufferevent_get_input(struct bufferevent *bufev)
+{
+ return bufev->input;
+}
+
+struct evbuffer *
+bufferevent_get_output(struct bufferevent *bufev)
+{
+ return bufev->output;
+}
+
+struct event_base *
+bufferevent_get_base(struct bufferevent *bufev)
+{
+ return bufev->ev_base;
+}
+
+int
+bufferevent_get_priority(const struct bufferevent *bufev)
+{
+ if (event_initialized(&bufev->ev_read)) {
+ return event_get_priority(&bufev->ev_read);
+ } else {
+ return event_base_get_npriorities(bufev->ev_base) / 2;
+ }
+}
+
+int
+bufferevent_write(struct bufferevent *bufev, const void *data, size_t size)
+{
+ if (evbuffer_add(bufev->output, data, size) == -1)
+ return (-1);
+
+ return 0;
+}
+
+int
+bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf)
+{
+ if (evbuffer_add_buffer(bufev->output, buf) == -1)
+ return (-1);
+
+ return 0;
+}
+
+size_t
+bufferevent_read(struct bufferevent *bufev, void *data, size_t size)
+{
+ return (evbuffer_remove(bufev->input, data, size));
+}
+
+int
+bufferevent_read_buffer(struct bufferevent *bufev, struct evbuffer *buf)
+{
+ return (evbuffer_add_buffer(buf, bufev->input));
+}
+
+int
+bufferevent_enable(struct bufferevent *bufev, short event)
+{
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ short impl_events = event;
+ int r = 0;
+
+ bufferevent_incref_and_lock_(bufev);
+ if (bufev_private->read_suspended)
+ impl_events &= ~EV_READ;
+ if (bufev_private->write_suspended)
+ impl_events &= ~EV_WRITE;
+
+ bufev->enabled |= event;
+
+ if (impl_events && bufev->be_ops->enable(bufev, impl_events) < 0)
+ r = -1;
+
+ bufferevent_decref_and_unlock_(bufev);
+ return r;
+}
+
+int
+bufferevent_set_timeouts(struct bufferevent *bufev,
+ const struct timeval *tv_read,
+ const struct timeval *tv_write)
+{
+ int r = 0;
+ BEV_LOCK(bufev);
+ if (tv_read) {
+ bufev->timeout_read = *tv_read;
+ } else {
+ evutil_timerclear(&bufev->timeout_read);
+ }
+ if (tv_write) {
+ bufev->timeout_write = *tv_write;
+ } else {
+ evutil_timerclear(&bufev->timeout_write);
+ }
+
+ if (bufev->be_ops->adj_timeouts)
+ r = bufev->be_ops->adj_timeouts(bufev);
+ BEV_UNLOCK(bufev);
+
+ return r;
+}
+
+
+/* Obsolete; use bufferevent_set_timeouts */
+void
+bufferevent_settimeout(struct bufferevent *bufev,
+ int timeout_read, int timeout_write)
+{
+ struct timeval tv_read, tv_write;
+ struct timeval *ptv_read = NULL, *ptv_write = NULL;
+
+ memset(&tv_read, 0, sizeof(tv_read));
+ memset(&tv_write, 0, sizeof(tv_write));
+
+ if (timeout_read) {
+ tv_read.tv_sec = timeout_read;
+ ptv_read = &tv_read;
+ }
+ if (timeout_write) {
+ tv_write.tv_sec = timeout_write;
+ ptv_write = &tv_write;
+ }
+
+ bufferevent_set_timeouts(bufev, ptv_read, ptv_write);
+}
+
+
+int
+bufferevent_disable_hard_(struct bufferevent *bufev, short event)
+{
+ int r = 0;
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+
+ BEV_LOCK(bufev);
+ bufev->enabled &= ~event;
+
+ bufev_private->connecting = 0;
+ if (bufev->be_ops->disable(bufev, event) < 0)
+ r = -1;
+
+ BEV_UNLOCK(bufev);
+ return r;
+}
+
+int
+bufferevent_disable(struct bufferevent *bufev, short event)
+{
+ int r = 0;
+
+ BEV_LOCK(bufev);
+ bufev->enabled &= ~event;
+
+ if (bufev->be_ops->disable(bufev, event) < 0)
+ r = -1;
+
+ BEV_UNLOCK(bufev);
+ return r;
+}
+
+/*
+ * Sets the water marks
+ */
+
+void
+bufferevent_setwatermark(struct bufferevent *bufev, short events,
+ size_t lowmark, size_t highmark)
+{
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+
+ BEV_LOCK(bufev);
+ if (events & EV_WRITE) {
+ bufev->wm_write.low = lowmark;
+ bufev->wm_write.high = highmark;
+ }
+
+ if (events & EV_READ) {
+ bufev->wm_read.low = lowmark;
+ bufev->wm_read.high = highmark;
+
+ if (highmark) {
+ /* There is now a new high-water mark for read.
+ enable the callback if needed, and see if we should
+ suspend/bufferevent_wm_unsuspend. */
+
+ if (bufev_private->read_watermarks_cb == NULL) {
+ bufev_private->read_watermarks_cb =
+ evbuffer_add_cb(bufev->input,
+ bufferevent_inbuf_wm_cb,
+ bufev);
+ }
+ evbuffer_cb_set_flags(bufev->input,
+ bufev_private->read_watermarks_cb,
+ EVBUFFER_CB_ENABLED|EVBUFFER_CB_NODEFER);
+
+ if (evbuffer_get_length(bufev->input) >= highmark)
+ bufferevent_wm_suspend_read(bufev);
+ else if (evbuffer_get_length(bufev->input) < highmark)
+ bufferevent_wm_unsuspend_read(bufev);
+ } else {
+ /* There is now no high-water mark for read. */
+ if (bufev_private->read_watermarks_cb)
+ evbuffer_cb_clear_flags(bufev->input,
+ bufev_private->read_watermarks_cb,
+ EVBUFFER_CB_ENABLED);
+ bufferevent_wm_unsuspend_read(bufev);
+ }
+ }
+ BEV_UNLOCK(bufev);
+}
+
+int
+bufferevent_getwatermark(struct bufferevent *bufev, short events,
+ size_t *lowmark, size_t *highmark)
+{
+ if (events == EV_WRITE) {
+ BEV_LOCK(bufev);
+ if (lowmark)
+ *lowmark = bufev->wm_write.low;
+ if (highmark)
+ *highmark = bufev->wm_write.high;
+ BEV_UNLOCK(bufev);
+ return 0;
+ }
+
+ if (events == EV_READ) {
+ BEV_LOCK(bufev);
+ if (lowmark)
+ *lowmark = bufev->wm_read.low;
+ if (highmark)
+ *highmark = bufev->wm_read.high;
+ BEV_UNLOCK(bufev);
+ return 0;
+ }
+ return -1;
+}
+
+int
+bufferevent_flush(struct bufferevent *bufev,
+ short iotype,
+ enum bufferevent_flush_mode mode)
+{
+ int r = -1;
+ BEV_LOCK(bufev);
+ if (bufev->be_ops->flush)
+ r = bufev->be_ops->flush(bufev, iotype, mode);
+ BEV_UNLOCK(bufev);
+ return r;
+}
+
+void
+bufferevent_incref_and_lock_(struct bufferevent *bufev)
+{
+ struct bufferevent_private *bufev_private =
+ BEV_UPCAST(bufev);
+ BEV_LOCK(bufev);
+ ++bufev_private->refcnt;
+}
+
+#if 0
+static void
+bufferevent_transfer_lock_ownership_(struct bufferevent *donor,
+ struct bufferevent *recipient)
+{
+ struct bufferevent_private *d = BEV_UPCAST(donor);
+ struct bufferevent_private *r = BEV_UPCAST(recipient);
+ if (d->lock != r->lock)
+ return;
+ if (r->own_lock)
+ return;
+ if (d->own_lock) {
+ d->own_lock = 0;
+ r->own_lock = 1;
+ }
+}
+#endif
+
+int
+bufferevent_decref_and_unlock_(struct bufferevent *bufev)
+{
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ int n_cbs = 0;
+#define MAX_CBS 16
+ struct event_callback *cbs[MAX_CBS];
+
+ EVUTIL_ASSERT(bufev_private->refcnt > 0);
+
+ if (--bufev_private->refcnt) {
+ BEV_UNLOCK(bufev);
+ return 0;
+ }
+
+ if (bufev->be_ops->unlink)
+ bufev->be_ops->unlink(bufev);
+
+ /* Okay, we're out of references. Let's finalize this once all the
+ * callbacks are done running. */
+ cbs[0] = &bufev->ev_read.ev_evcallback;
+ cbs[1] = &bufev->ev_write.ev_evcallback;
+ cbs[2] = &bufev_private->deferred;
+ n_cbs = 3;
+ if (bufev_private->rate_limiting) {
+ struct event *e = &bufev_private->rate_limiting->refill_bucket_event;
+ if (event_initialized(e))
+ cbs[n_cbs++] = &e->ev_evcallback;
+ }
+ n_cbs += evbuffer_get_callbacks_(bufev->input, cbs+n_cbs, MAX_CBS-n_cbs);
+ n_cbs += evbuffer_get_callbacks_(bufev->output, cbs+n_cbs, MAX_CBS-n_cbs);
+
+ event_callback_finalize_many_(bufev->ev_base, n_cbs, cbs,
+ bufferevent_finalize_cb_);
+
+#undef MAX_CBS
+ BEV_UNLOCK(bufev);
+
+ return 1;
+}
+
+static void
+bufferevent_finalize_cb_(struct event_callback *evcb, void *arg_)
+{
+ struct bufferevent *bufev = arg_;
+ struct bufferevent *underlying;
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+
+ BEV_LOCK(bufev);
+ underlying = bufferevent_get_underlying(bufev);
+
+ /* Clean up the shared info */
+ if (bufev->be_ops->destruct)
+ bufev->be_ops->destruct(bufev);
+
+ /* XXX what happens if refcnt for these buffers is > 1?
+ * The buffers can share a lock with this bufferevent object,
+ * but the lock might be destroyed below. */
+ /* evbuffer will free the callbacks */
+ evbuffer_free(bufev->input);
+ evbuffer_free(bufev->output);
+
+ if (bufev_private->rate_limiting) {
+ if (bufev_private->rate_limiting->group)
+ bufferevent_remove_from_rate_limit_group_internal_(bufev,0);
+ mm_free(bufev_private->rate_limiting);
+ bufev_private->rate_limiting = NULL;
+ }
+
+
+ BEV_UNLOCK(bufev);
+
+ if (bufev_private->own_lock)
+ EVTHREAD_FREE_LOCK(bufev_private->lock,
+ EVTHREAD_LOCKTYPE_RECURSIVE);
+
+ /* Free the actual allocated memory. */
+ mm_free(((char*)bufev) - bufev->be_ops->mem_offset);
+
+ /* Release the reference to underlying now that we no longer need the
+ * reference to it. We wait this long mainly in case our lock is
+ * shared with underlying.
+ *
+ * The 'destruct' function will also drop a reference to underlying
+ * if BEV_OPT_CLOSE_ON_FREE is set.
+ *
+ * XXX Should we/can we just refcount evbuffer/bufferevent locks?
+ * It would probably save us some headaches.
+ */
+ if (underlying)
+ bufferevent_decref_(underlying);
+}
+
+int
+bufferevent_decref(struct bufferevent *bufev)
+{
+ BEV_LOCK(bufev);
+ return bufferevent_decref_and_unlock_(bufev);
+}
+
+void
+bufferevent_free(struct bufferevent *bufev)
+{
+ BEV_LOCK(bufev);
+ bufferevent_setcb(bufev, NULL, NULL, NULL, NULL);
+ bufferevent_cancel_all_(bufev);
+ bufferevent_decref_and_unlock_(bufev);
+}
+
+void
+bufferevent_incref(struct bufferevent *bufev)
+{
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+
+ /* XXX: now that this function is public, we might want to
+ * - return the count from this function
+ * - create a new function to atomically grab the current refcount
+ */
+ BEV_LOCK(bufev);
+ ++bufev_private->refcnt;
+ BEV_UNLOCK(bufev);
+}
+
+int
+bufferevent_enable_locking_(struct bufferevent *bufev, void *lock)
+{
+#ifdef EVENT__DISABLE_THREAD_SUPPORT
+ return -1;
+#else
+ struct bufferevent *underlying;
+
+ if (BEV_UPCAST(bufev)->lock)
+ return -1;
+ underlying = bufferevent_get_underlying(bufev);
+
+ if (!lock && underlying && BEV_UPCAST(underlying)->lock) {
+ lock = BEV_UPCAST(underlying)->lock;
+ BEV_UPCAST(bufev)->lock = lock;
+ BEV_UPCAST(bufev)->own_lock = 0;
+ } else if (!lock) {
+ EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ if (!lock)
+ return -1;
+ BEV_UPCAST(bufev)->lock = lock;
+ BEV_UPCAST(bufev)->own_lock = 1;
+ } else {
+ BEV_UPCAST(bufev)->lock = lock;
+ BEV_UPCAST(bufev)->own_lock = 0;
+ }
+ evbuffer_enable_locking(bufev->input, lock);
+ evbuffer_enable_locking(bufev->output, lock);
+
+ if (underlying && !BEV_UPCAST(underlying)->lock)
+ bufferevent_enable_locking_(underlying, lock);
+
+ return 0;
+#endif
+}
+
+int
+bufferevent_setfd(struct bufferevent *bev, evutil_socket_t fd)
+{
+ union bufferevent_ctrl_data d;
+ int res = -1;
+ d.fd = fd;
+ BEV_LOCK(bev);
+ if (bev->be_ops->ctrl)
+ res = bev->be_ops->ctrl(bev, BEV_CTRL_SET_FD, &d);
+ BEV_UNLOCK(bev);
+ return res;
+}
+
+evutil_socket_t
+bufferevent_getfd(struct bufferevent *bev)
+{
+ union bufferevent_ctrl_data d;
+ int res = -1;
+ d.fd = -1;
+ BEV_LOCK(bev);
+ if (bev->be_ops->ctrl)
+ res = bev->be_ops->ctrl(bev, BEV_CTRL_GET_FD, &d);
+ BEV_UNLOCK(bev);
+ return (res<0) ? -1 : d.fd;
+}
+
+enum bufferevent_options
+bufferevent_get_options_(struct bufferevent *bev)
+{
+ struct bufferevent_private *bev_p =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+ enum bufferevent_options options;
+
+ BEV_LOCK(bev);
+ options = bev_p->options;
+ BEV_UNLOCK(bev);
+ return options;
+}
+
+
+static void
+bufferevent_cancel_all_(struct bufferevent *bev)
+{
+ union bufferevent_ctrl_data d;
+ memset(&d, 0, sizeof(d));
+ BEV_LOCK(bev);
+ if (bev->be_ops->ctrl)
+ bev->be_ops->ctrl(bev, BEV_CTRL_CANCEL_ALL, &d);
+ BEV_UNLOCK(bev);
+}
+
+short
+bufferevent_get_enabled(struct bufferevent *bufev)
+{
+ short r;
+ BEV_LOCK(bufev);
+ r = bufev->enabled;
+ BEV_UNLOCK(bufev);
+ return r;
+}
+
+struct bufferevent *
+bufferevent_get_underlying(struct bufferevent *bev)
+{
+ union bufferevent_ctrl_data d;
+ int res = -1;
+ d.ptr = NULL;
+ BEV_LOCK(bev);
+ if (bev->be_ops->ctrl)
+ res = bev->be_ops->ctrl(bev, BEV_CTRL_GET_UNDERLYING, &d);
+ BEV_UNLOCK(bev);
+ return (res<0) ? NULL : d.ptr;
+}
+
+static void
+bufferevent_generic_read_timeout_cb(evutil_socket_t fd, short event, void *ctx)
+{
+ struct bufferevent *bev = ctx;
+ bufferevent_incref_and_lock_(bev);
+ bufferevent_disable(bev, EV_READ);
+ bufferevent_run_eventcb_(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_READING, 0);
+ bufferevent_decref_and_unlock_(bev);
+}
+static void
+bufferevent_generic_write_timeout_cb(evutil_socket_t fd, short event, void *ctx)
+{
+ struct bufferevent *bev = ctx;
+ bufferevent_incref_and_lock_(bev);
+ bufferevent_disable(bev, EV_WRITE);
+ bufferevent_run_eventcb_(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING, 0);
+ bufferevent_decref_and_unlock_(bev);
+}
+
+void
+bufferevent_init_generic_timeout_cbs_(struct bufferevent *bev)
+{
+ event_assign(&bev->ev_read, bev->ev_base, -1, EV_FINALIZE,
+ bufferevent_generic_read_timeout_cb, bev);
+ event_assign(&bev->ev_write, bev->ev_base, -1, EV_FINALIZE,
+ bufferevent_generic_write_timeout_cb, bev);
+}
+
+int
+bufferevent_generic_adj_timeouts_(struct bufferevent *bev)
+{
+ const short enabled = bev->enabled;
+ struct bufferevent_private *bev_p =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+ int r1=0, r2=0;
+ if ((enabled & EV_READ) && !bev_p->read_suspended &&
+ evutil_timerisset(&bev->timeout_read))
+ r1 = event_add(&bev->ev_read, &bev->timeout_read);
+ else
+ r1 = event_del(&bev->ev_read);
+
+ if ((enabled & EV_WRITE) && !bev_p->write_suspended &&
+ evutil_timerisset(&bev->timeout_write) &&
+ evbuffer_get_length(bev->output))
+ r2 = event_add(&bev->ev_write, &bev->timeout_write);
+ else
+ r2 = event_del(&bev->ev_write);
+ if (r1 < 0 || r2 < 0)
+ return -1;
+ return 0;
+}
+
+int
+bufferevent_generic_adj_existing_timeouts_(struct bufferevent *bev)
+{
+ int r = 0;
+ if (event_pending(&bev->ev_read, EV_READ, NULL)) {
+ if (evutil_timerisset(&bev->timeout_read)) {
+ if (bufferevent_add_event_(&bev->ev_read, &bev->timeout_read) < 0)
+ r = -1;
+ } else {
+ event_remove_timer(&bev->ev_read);
+ }
+ }
+ if (event_pending(&bev->ev_write, EV_WRITE, NULL)) {
+ if (evutil_timerisset(&bev->timeout_write)) {
+ if (bufferevent_add_event_(&bev->ev_write, &bev->timeout_write) < 0)
+ r = -1;
+ } else {
+ event_remove_timer(&bev->ev_write);
+ }
+ }
+ return r;
+}
+
+int
+bufferevent_add_event_(struct event *ev, const struct timeval *tv)
+{
+ if (!evutil_timerisset(tv))
+ return event_add(ev, NULL);
+ else
+ return event_add(ev, tv);
+}
+
+/* For use by user programs only; internally, we should be calling
+ either bufferevent_incref_and_lock_(), or BEV_LOCK. */
+void
+bufferevent_lock(struct bufferevent *bev)
+{
+ bufferevent_incref_and_lock_(bev);
+}
+
+void
+bufferevent_unlock(struct bufferevent *bev)
+{
+ bufferevent_decref_and_unlock_(bev);
+}
diff --git a/libs/libevent/src/bufferevent_async.c b/libs/libevent/src/bufferevent_async.c
new file mode 100644
index 0000000000..6395e57a9f
--- /dev/null
+++ b/libs/libevent/src/bufferevent_async.c
@@ -0,0 +1,686 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef EVENT__HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#endif
+
+#include <sys/queue.h>
+
+#include "event2/util.h"
+#include "event2/bufferevent.h"
+#include "event2/buffer.h"
+#include "event2/bufferevent_struct.h"
+#include "event2/event.h"
+#include "event2/util.h"
+#include "event-internal.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "bufferevent-internal.h"
+#include "util-internal.h"
+#include "iocp-internal.h"
+
+#ifndef SO_UPDATE_CONNECT_CONTEXT
+/* Mingw is sometimes missing this */
+#define SO_UPDATE_CONNECT_CONTEXT 0x7010
+#endif
+
+/* prototypes */
+static int be_async_enable(struct bufferevent *, short);
+static int be_async_disable(struct bufferevent *, short);
+static void be_async_destruct(struct bufferevent *);
+static int be_async_flush(struct bufferevent *, short, enum bufferevent_flush_mode);
+static int be_async_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
+
+struct bufferevent_async {
+ struct bufferevent_private bev;
+ struct event_overlapped connect_overlapped;
+ struct event_overlapped read_overlapped;
+ struct event_overlapped write_overlapped;
+ size_t read_in_progress;
+ size_t write_in_progress;
+ unsigned ok : 1;
+ unsigned read_added : 1;
+ unsigned write_added : 1;
+};
+
+const struct bufferevent_ops bufferevent_ops_async = {
+ "socket_async",
+ evutil_offsetof(struct bufferevent_async, bev.bev),
+ be_async_enable,
+ be_async_disable,
+ NULL, /* Unlink */
+ be_async_destruct,
+ bufferevent_generic_adj_timeouts_,
+ be_async_flush,
+ be_async_ctrl,
+};
+
+static inline struct bufferevent_async *
+upcast(struct bufferevent *bev)
+{
+ struct bufferevent_async *bev_a;
+ if (bev->be_ops != &bufferevent_ops_async)
+ return NULL;
+ bev_a = EVUTIL_UPCAST(bev, struct bufferevent_async, bev.bev);
+ return bev_a;
+}
+
+static inline struct bufferevent_async *
+upcast_connect(struct event_overlapped *eo)
+{
+ struct bufferevent_async *bev_a;
+ bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, connect_overlapped);
+ EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
+ return bev_a;
+}
+
+static inline struct bufferevent_async *
+upcast_read(struct event_overlapped *eo)
+{
+ struct bufferevent_async *bev_a;
+ bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, read_overlapped);
+ EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
+ return bev_a;
+}
+
+static inline struct bufferevent_async *
+upcast_write(struct event_overlapped *eo)
+{
+ struct bufferevent_async *bev_a;
+ bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, write_overlapped);
+ EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
+ return bev_a;
+}
+
+static void
+bev_async_del_write(struct bufferevent_async *beva)
+{
+ struct bufferevent *bev = &beva->bev.bev;
+
+ if (beva->write_added) {
+ beva->write_added = 0;
+ event_base_del_virtual_(bev->ev_base);
+ }
+}
+
+static void
+bev_async_del_read(struct bufferevent_async *beva)
+{
+ struct bufferevent *bev = &beva->bev.bev;
+
+ if (beva->read_added) {
+ beva->read_added = 0;
+ event_base_del_virtual_(bev->ev_base);
+ }
+}
+
+static void
+bev_async_add_write(struct bufferevent_async *beva)
+{
+ struct bufferevent *bev = &beva->bev.bev;
+
+ if (!beva->write_added) {
+ beva->write_added = 1;
+ event_base_add_virtual_(bev->ev_base);
+ }
+}
+
+static void
+bev_async_add_read(struct bufferevent_async *beva)
+{
+ struct bufferevent *bev = &beva->bev.bev;
+
+ if (!beva->read_added) {
+ beva->read_added = 1;
+ event_base_add_virtual_(bev->ev_base);
+ }
+}
+
+static void
+bev_async_consider_writing(struct bufferevent_async *beva)
+{
+ size_t at_most;
+ int limit;
+ struct bufferevent *bev = &beva->bev.bev;
+
+ /* Don't write if there's a write in progress, or we do not
+ * want to write, or when there's nothing left to write. */
+ if (beva->write_in_progress || beva->bev.connecting)
+ return;
+ if (!beva->ok || !(bev->enabled&EV_WRITE) ||
+ !evbuffer_get_length(bev->output)) {
+ bev_async_del_write(beva);
+ return;
+ }
+
+ at_most = evbuffer_get_length(bev->output);
+
+ /* This is safe so long as bufferevent_get_write_max never returns
+ * more than INT_MAX. That's true for now. XXXX */
+ limit = (int)bufferevent_get_write_max_(&beva->bev);
+ if (at_most >= (size_t)limit && limit >= 0)
+ at_most = limit;
+
+ if (beva->bev.write_suspended) {
+ bev_async_del_write(beva);
+ return;
+ }
+
+ /* XXXX doesn't respect low-water mark very well. */
+ bufferevent_incref_(bev);
+ if (evbuffer_launch_write_(bev->output, at_most,
+ &beva->write_overlapped)) {
+ bufferevent_decref_(bev);
+ beva->ok = 0;
+ bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR, 0);
+ } else {
+ beva->write_in_progress = at_most;
+ bufferevent_decrement_write_buckets_(&beva->bev, at_most);
+ bev_async_add_write(beva);
+ }
+}
+
+static void
+bev_async_consider_reading(struct bufferevent_async *beva)
+{
+ size_t cur_size;
+ size_t read_high;
+ size_t at_most;
+ int limit;
+ struct bufferevent *bev = &beva->bev.bev;
+
+ /* Don't read if there is a read in progress, or we do not
+ * want to read. */
+ if (beva->read_in_progress || beva->bev.connecting)
+ return;
+ if (!beva->ok || !(bev->enabled&EV_READ)) {
+ bev_async_del_read(beva);
+ return;
+ }
+
+ /* Don't read if we're full */
+ cur_size = evbuffer_get_length(bev->input);
+ read_high = bev->wm_read.high;
+ if (read_high) {
+ if (cur_size >= read_high) {
+ bev_async_del_read(beva);
+ return;
+ }
+ at_most = read_high - cur_size;
+ } else {
+ at_most = 16384; /* FIXME totally magic. */
+ }
+
+ /* XXXX This over-commits. */
+ /* XXXX see also not above on cast on bufferevent_get_write_max_() */
+ limit = (int)bufferevent_get_read_max_(&beva->bev);
+ if (at_most >= (size_t)limit && limit >= 0)
+ at_most = limit;
+
+ if (beva->bev.read_suspended) {
+ bev_async_del_read(beva);
+ return;
+ }
+
+ bufferevent_incref_(bev);
+ if (evbuffer_launch_read_(bev->input, at_most, &beva->read_overlapped)) {
+ beva->ok = 0;
+ bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR, 0);
+ bufferevent_decref_(bev);
+ } else {
+ beva->read_in_progress = at_most;
+ bufferevent_decrement_read_buckets_(&beva->bev, at_most);
+ bev_async_add_read(beva);
+ }
+
+ return;
+}
+
+static void
+be_async_outbuf_callback(struct evbuffer *buf,
+ const struct evbuffer_cb_info *cbinfo,
+ void *arg)
+{
+ struct bufferevent *bev = arg;
+ struct bufferevent_async *bev_async = upcast(bev);
+
+ /* If we added data to the outbuf and were not writing before,
+ * we may want to write now. */
+
+ bufferevent_incref_and_lock_(bev);
+
+ if (cbinfo->n_added)
+ bev_async_consider_writing(bev_async);
+
+ bufferevent_decref_and_unlock_(bev);
+}
+
+static void
+be_async_inbuf_callback(struct evbuffer *buf,
+ const struct evbuffer_cb_info *cbinfo,
+ void *arg)
+{
+ struct bufferevent *bev = arg;
+ struct bufferevent_async *bev_async = upcast(bev);
+
+ /* If we drained data from the inbuf and were not reading before,
+ * we may want to read now */
+
+ bufferevent_incref_and_lock_(bev);
+
+ if (cbinfo->n_deleted)
+ bev_async_consider_reading(bev_async);
+
+ bufferevent_decref_and_unlock_(bev);
+}
+
+static int
+be_async_enable(struct bufferevent *buf, short what)
+{
+ struct bufferevent_async *bev_async = upcast(buf);
+
+ if (!bev_async->ok)
+ return -1;
+
+ if (bev_async->bev.connecting) {
+ /* Don't launch anything during connection attempts. */
+ return 0;
+ }
+
+ if (what & EV_READ)
+ BEV_RESET_GENERIC_READ_TIMEOUT(buf);
+ if (what & EV_WRITE)
+ BEV_RESET_GENERIC_WRITE_TIMEOUT(buf);
+
+ /* If we newly enable reading or writing, and we aren't reading or
+ writing already, consider launching a new read or write. */
+
+ if (what & EV_READ)
+ bev_async_consider_reading(bev_async);
+ if (what & EV_WRITE)
+ bev_async_consider_writing(bev_async);
+ return 0;
+}
+
+static int
+be_async_disable(struct bufferevent *bev, short what)
+{
+ struct bufferevent_async *bev_async = upcast(bev);
+ /* XXXX If we disable reading or writing, we may want to consider
+ * canceling any in-progress read or write operation, though it might
+ * not work. */
+
+ if (what & EV_READ) {
+ BEV_DEL_GENERIC_READ_TIMEOUT(bev);
+ bev_async_del_read(bev_async);
+ }
+ if (what & EV_WRITE) {
+ BEV_DEL_GENERIC_WRITE_TIMEOUT(bev);
+ bev_async_del_write(bev_async);
+ }
+
+ return 0;
+}
+
+static void
+be_async_destruct(struct bufferevent *bev)
+{
+ struct bufferevent_async *bev_async = upcast(bev);
+ struct bufferevent_private *bev_p = BEV_UPCAST(bev);
+ evutil_socket_t fd;
+
+ EVUTIL_ASSERT(!upcast(bev)->write_in_progress &&
+ !upcast(bev)->read_in_progress);
+
+ bev_async_del_read(bev_async);
+ bev_async_del_write(bev_async);
+
+ fd = evbuffer_overlapped_get_fd_(bev->input);
+ if (fd != (evutil_socket_t)INVALID_SOCKET &&
+ (bev_p->options & BEV_OPT_CLOSE_ON_FREE)) {
+ evutil_closesocket(fd);
+ evbuffer_overlapped_set_fd_(bev->input, INVALID_SOCKET);
+ }
+}
+
+/* GetQueuedCompletionStatus doesn't reliably yield WSA error codes, so
+ * we use WSAGetOverlappedResult to translate. */
+static void
+bev_async_set_wsa_error(struct bufferevent *bev, struct event_overlapped *eo)
+{
+ DWORD bytes, flags;
+ evutil_socket_t fd;
+
+ fd = evbuffer_overlapped_get_fd_(bev->input);
+ WSAGetOverlappedResult(fd, &eo->overlapped, &bytes, FALSE, &flags);
+}
+
+static int
+be_async_flush(struct bufferevent *bev, short what,
+ enum bufferevent_flush_mode mode)
+{
+ return 0;
+}
+
+static void
+connect_complete(struct event_overlapped *eo, ev_uintptr_t key,
+ ev_ssize_t nbytes, int ok)
+{
+ struct bufferevent_async *bev_a = upcast_connect(eo);
+ struct bufferevent *bev = &bev_a->bev.bev;
+ evutil_socket_t sock;
+
+ BEV_LOCK(bev);
+
+ EVUTIL_ASSERT(bev_a->bev.connecting);
+ bev_a->bev.connecting = 0;
+ sock = evbuffer_overlapped_get_fd_(bev_a->bev.bev.input);
+ /* XXXX Handle error? */
+ setsockopt(sock, SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT, NULL, 0);
+
+ if (ok)
+ bufferevent_async_set_connected_(bev);
+ else
+ bev_async_set_wsa_error(bev, eo);
+
+ bufferevent_run_eventcb_(bev,
+ ok? BEV_EVENT_CONNECTED : BEV_EVENT_ERROR, 0);
+
+ event_base_del_virtual_(bev->ev_base);
+
+ bufferevent_decref_and_unlock_(bev);
+}
+
+static void
+read_complete(struct event_overlapped *eo, ev_uintptr_t key,
+ ev_ssize_t nbytes, int ok)
+{
+ struct bufferevent_async *bev_a = upcast_read(eo);
+ struct bufferevent *bev = &bev_a->bev.bev;
+ short what = BEV_EVENT_READING;
+ ev_ssize_t amount_unread;
+ BEV_LOCK(bev);
+ EVUTIL_ASSERT(bev_a->read_in_progress);
+
+ amount_unread = bev_a->read_in_progress - nbytes;
+ evbuffer_commit_read_(bev->input, nbytes);
+ bev_a->read_in_progress = 0;
+ if (amount_unread)
+ bufferevent_decrement_read_buckets_(&bev_a->bev, -amount_unread);
+
+ if (!ok)
+ bev_async_set_wsa_error(bev, eo);
+
+ if (bev_a->ok) {
+ if (ok && nbytes) {
+ BEV_RESET_GENERIC_READ_TIMEOUT(bev);
+ bufferevent_trigger_nolock_(bev, EV_READ, 0);
+ bev_async_consider_reading(bev_a);
+ } else if (!ok) {
+ what |= BEV_EVENT_ERROR;
+ bev_a->ok = 0;
+ bufferevent_run_eventcb_(bev, what, 0);
+ } else if (!nbytes) {
+ what |= BEV_EVENT_EOF;
+ bev_a->ok = 0;
+ bufferevent_run_eventcb_(bev, what, 0);
+ }
+ }
+
+ bufferevent_decref_and_unlock_(bev);
+}
+
+static void
+write_complete(struct event_overlapped *eo, ev_uintptr_t key,
+ ev_ssize_t nbytes, int ok)
+{
+ struct bufferevent_async *bev_a = upcast_write(eo);
+ struct bufferevent *bev = &bev_a->bev.bev;
+ short what = BEV_EVENT_WRITING;
+ ev_ssize_t amount_unwritten;
+
+ BEV_LOCK(bev);
+ EVUTIL_ASSERT(bev_a->write_in_progress);
+
+ amount_unwritten = bev_a->write_in_progress - nbytes;
+ evbuffer_commit_write_(bev->output, nbytes);
+ bev_a->write_in_progress = 0;
+
+ if (amount_unwritten)
+ bufferevent_decrement_write_buckets_(&bev_a->bev,
+ -amount_unwritten);
+
+
+ if (!ok)
+ bev_async_set_wsa_error(bev, eo);
+
+ if (bev_a->ok) {
+ if (ok && nbytes) {
+ BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
+ bufferevent_trigger_nolock_(bev, EV_WRITE, 0);
+ bev_async_consider_writing(bev_a);
+ } else if (!ok) {
+ what |= BEV_EVENT_ERROR;
+ bev_a->ok = 0;
+ bufferevent_run_eventcb_(bev, what, 0);
+ } else if (!nbytes) {
+ what |= BEV_EVENT_EOF;
+ bev_a->ok = 0;
+ bufferevent_run_eventcb_(bev, what, 0);
+ }
+ }
+
+ bufferevent_decref_and_unlock_(bev);
+}
+
+struct bufferevent *
+bufferevent_async_new_(struct event_base *base,
+ evutil_socket_t fd, int options)
+{
+ struct bufferevent_async *bev_a;
+ struct bufferevent *bev;
+ struct event_iocp_port *iocp;
+
+ options |= BEV_OPT_THREADSAFE;
+
+ if (!(iocp = event_base_get_iocp_(base)))
+ return NULL;
+
+ if (fd >= 0 && event_iocp_port_associate_(iocp, fd, 1)<0) {
+ int err = GetLastError();
+ /* We may have alrady associated this fd with a port.
+ * Let's hope it's this port, and that the error code
+ * for doing this neer changes. */
+ if (err != ERROR_INVALID_PARAMETER)
+ return NULL;
+ }
+
+ if (!(bev_a = mm_calloc(1, sizeof(struct bufferevent_async))))
+ return NULL;
+
+ bev = &bev_a->bev.bev;
+ if (!(bev->input = evbuffer_overlapped_new_(fd))) {
+ mm_free(bev_a);
+ return NULL;
+ }
+ if (!(bev->output = evbuffer_overlapped_new_(fd))) {
+ evbuffer_free(bev->input);
+ mm_free(bev_a);
+ return NULL;
+ }
+
+ if (bufferevent_init_common_(&bev_a->bev, base, &bufferevent_ops_async,
+ options)<0)
+ goto err;
+
+ evbuffer_add_cb(bev->input, be_async_inbuf_callback, bev);
+ evbuffer_add_cb(bev->output, be_async_outbuf_callback, bev);
+
+ event_overlapped_init_(&bev_a->connect_overlapped, connect_complete);
+ event_overlapped_init_(&bev_a->read_overlapped, read_complete);
+ event_overlapped_init_(&bev_a->write_overlapped, write_complete);
+
+ bufferevent_init_generic_timeout_cbs_(bev);
+
+ bev_a->ok = fd >= 0;
+
+ return bev;
+err:
+ bufferevent_free(&bev_a->bev.bev);
+ return NULL;
+}
+
+void
+bufferevent_async_set_connected_(struct bufferevent *bev)
+{
+ struct bufferevent_async *bev_async = upcast(bev);
+ bev_async->ok = 1;
+ bufferevent_init_generic_timeout_cbs_(bev);
+ /* Now's a good time to consider reading/writing */
+ be_async_enable(bev, bev->enabled);
+}
+
+int
+bufferevent_async_can_connect_(struct bufferevent *bev)
+{
+ const struct win32_extension_fns *ext =
+ event_get_win32_extension_fns_();
+
+ if (BEV_IS_ASYNC(bev) &&
+ event_base_get_iocp_(bev->ev_base) &&
+ ext && ext->ConnectEx)
+ return 1;
+
+ return 0;
+}
+
+int
+bufferevent_async_connect_(struct bufferevent *bev, evutil_socket_t fd,
+ const struct sockaddr *sa, int socklen)
+{
+ BOOL rc;
+ struct bufferevent_async *bev_async = upcast(bev);
+ struct sockaddr_storage ss;
+ const struct win32_extension_fns *ext =
+ event_get_win32_extension_fns_();
+
+ EVUTIL_ASSERT(ext && ext->ConnectEx && fd >= 0 && sa != NULL);
+
+ /* ConnectEx() requires that the socket be bound to an address
+ * with bind() before using, otherwise it will fail. We attempt
+ * to issue a bind() here, taking into account that the error
+ * code is set to WSAEINVAL when the socket is already bound. */
+ memset(&ss, 0, sizeof(ss));
+ if (sa->sa_family == AF_INET) {
+ struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
+ sin->sin_family = AF_INET;
+ sin->sin_addr.s_addr = INADDR_ANY;
+ } else if (sa->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_addr = in6addr_any;
+ } else {
+ /* Well, the user will have to bind() */
+ return -1;
+ }
+ if (bind(fd, (struct sockaddr *)&ss, sizeof(ss)) < 0 &&
+ WSAGetLastError() != WSAEINVAL)
+ return -1;
+
+ event_base_add_virtual_(bev->ev_base);
+ bufferevent_incref_(bev);
+ rc = ext->ConnectEx(fd, sa, socklen, NULL, 0, NULL,
+ &bev_async->connect_overlapped.overlapped);
+ if (rc || WSAGetLastError() == ERROR_IO_PENDING)
+ return 0;
+
+ event_base_del_virtual_(bev->ev_base);
+ bufferevent_decref_(bev);
+
+ return -1;
+}
+
+static int
+be_async_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
+ union bufferevent_ctrl_data *data)
+{
+ switch (op) {
+ case BEV_CTRL_GET_FD:
+ data->fd = evbuffer_overlapped_get_fd_(bev->input);
+ return 0;
+ case BEV_CTRL_SET_FD: {
+ struct event_iocp_port *iocp;
+
+ if (data->fd == evbuffer_overlapped_get_fd_(bev->input))
+ return 0;
+ if (!(iocp = event_base_get_iocp_(bev->ev_base)))
+ return -1;
+ if (event_iocp_port_associate_(iocp, data->fd, 1) < 0)
+ return -1;
+ evbuffer_overlapped_set_fd_(bev->input, data->fd);
+ evbuffer_overlapped_set_fd_(bev->output, data->fd);
+ return 0;
+ }
+ case BEV_CTRL_CANCEL_ALL: {
+ struct bufferevent_async *bev_a = upcast(bev);
+ evutil_socket_t fd = evbuffer_overlapped_get_fd_(bev->input);
+ if (fd != (evutil_socket_t)INVALID_SOCKET &&
+ (bev_a->bev.options & BEV_OPT_CLOSE_ON_FREE)) {
+ closesocket(fd);
+ evbuffer_overlapped_set_fd_(bev->input, INVALID_SOCKET);
+ }
+ bev_a->ok = 0;
+ return 0;
+ }
+ case BEV_CTRL_GET_UNDERLYING:
+ default:
+ return -1;
+ }
+}
+
+
diff --git a/libs/libevent/src/bufferevent_filter.c b/libs/libevent/src/bufferevent_filter.c
new file mode 100644
index 0000000000..6c3ffc4f2d
--- /dev/null
+++ b/libs/libevent/src/bufferevent_filter.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ * Copyright (c) 2002-2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+
+#include "event2/event-config.h"
+
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef EVENT__HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+
+#ifdef _WIN32
+#include <winsock2.h>
+#endif
+
+#include "event2/util.h"
+#include "event2/bufferevent.h"
+#include "event2/buffer.h"
+#include "event2/bufferevent_struct.h"
+#include "event2/event.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "bufferevent-internal.h"
+#include "util-internal.h"
+
+/* prototypes */
+static int be_filter_enable(struct bufferevent *, short);
+static int be_filter_disable(struct bufferevent *, short);
+static void be_filter_unlink(struct bufferevent *);
+static void be_filter_destruct(struct bufferevent *);
+
+static void be_filter_readcb(struct bufferevent *, void *);
+static void be_filter_writecb(struct bufferevent *, void *);
+static void be_filter_eventcb(struct bufferevent *, short, void *);
+static int be_filter_flush(struct bufferevent *bufev,
+ short iotype, enum bufferevent_flush_mode mode);
+static int be_filter_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
+
+static void bufferevent_filtered_outbuf_cb(struct evbuffer *buf,
+ const struct evbuffer_cb_info *info, void *arg);
+
+struct bufferevent_filtered {
+ struct bufferevent_private bev;
+
+ /** The bufferevent that we read/write filtered data from/to. */
+ struct bufferevent *underlying;
+ /** A callback on our outbuf to notice when somebody adds data */
+ struct evbuffer_cb_entry *outbuf_cb;
+ /** True iff we have received an EOF callback from the underlying
+ * bufferevent. */
+ unsigned got_eof;
+
+ /** Function to free context when we're done. */
+ void (*free_context)(void *);
+ /** Input filter */
+ bufferevent_filter_cb process_in;
+ /** Output filter */
+ bufferevent_filter_cb process_out;
+ /** User-supplied argument to the filters. */
+ void *context;
+};
+
+const struct bufferevent_ops bufferevent_ops_filter = {
+ "filter",
+ evutil_offsetof(struct bufferevent_filtered, bev.bev),
+ be_filter_enable,
+ be_filter_disable,
+ be_filter_unlink,
+ be_filter_destruct,
+ bufferevent_generic_adj_timeouts_,
+ be_filter_flush,
+ be_filter_ctrl,
+};
+
+/* Given a bufferevent that's really the bev filter of a bufferevent_filtered,
+ * return that bufferevent_filtered. Returns NULL otherwise.*/
+static inline struct bufferevent_filtered *
+upcast(struct bufferevent *bev)
+{
+ struct bufferevent_filtered *bev_f;
+ if (bev->be_ops != &bufferevent_ops_filter)
+ return NULL;
+ bev_f = (void*)( ((char*)bev) -
+ evutil_offsetof(struct bufferevent_filtered, bev.bev));
+ EVUTIL_ASSERT(bev_f->bev.bev.be_ops == &bufferevent_ops_filter);
+ return bev_f;
+}
+
+#define downcast(bev_f) (&(bev_f)->bev.bev)
+
+/** Return 1 iff bevf's underlying bufferevent's output buffer is at or
+ * over its high watermark such that we should not write to it in a given
+ * flush mode. */
+static int
+be_underlying_writebuf_full(struct bufferevent_filtered *bevf,
+ enum bufferevent_flush_mode state)
+{
+ struct bufferevent *u = bevf->underlying;
+ return state == BEV_NORMAL &&
+ u->wm_write.high &&
+ evbuffer_get_length(u->output) >= u->wm_write.high;
+}
+
+/** Return 1 if our input buffer is at or over its high watermark such that we
+ * should not write to it in a given flush mode. */
+static int
+be_readbuf_full(struct bufferevent_filtered *bevf,
+ enum bufferevent_flush_mode state)
+{
+ struct bufferevent *bufev = downcast(bevf);
+ return state == BEV_NORMAL &&
+ bufev->wm_read.high &&
+ evbuffer_get_length(bufev->input) >= bufev->wm_read.high;
+}
+
+
+/* Filter to use when we're created with a NULL filter. */
+static enum bufferevent_filter_result
+be_null_filter(struct evbuffer *src, struct evbuffer *dst, ev_ssize_t lim,
+ enum bufferevent_flush_mode state, void *ctx)
+{
+ (void)state;
+ if (evbuffer_remove_buffer(src, dst, lim) == 0)
+ return BEV_OK;
+ else
+ return BEV_ERROR;
+}
+
+struct bufferevent *
+bufferevent_filter_new(struct bufferevent *underlying,
+ bufferevent_filter_cb input_filter,
+ bufferevent_filter_cb output_filter,
+ int options,
+ void (*free_context)(void *),
+ void *ctx)
+{
+ struct bufferevent_filtered *bufev_f;
+ int tmp_options = options & ~BEV_OPT_THREADSAFE;
+
+ if (!underlying)
+ return NULL;
+
+ if (!input_filter)
+ input_filter = be_null_filter;
+ if (!output_filter)
+ output_filter = be_null_filter;
+
+ bufev_f = mm_calloc(1, sizeof(struct bufferevent_filtered));
+ if (!bufev_f)
+ return NULL;
+
+ if (bufferevent_init_common_(&bufev_f->bev, underlying->ev_base,
+ &bufferevent_ops_filter, tmp_options) < 0) {
+ mm_free(bufev_f);
+ return NULL;
+ }
+ if (options & BEV_OPT_THREADSAFE) {
+ bufferevent_enable_locking_(downcast(bufev_f), NULL);
+ }
+
+ bufev_f->underlying = underlying;
+
+ bufev_f->process_in = input_filter;
+ bufev_f->process_out = output_filter;
+ bufev_f->free_context = free_context;
+ bufev_f->context = ctx;
+
+ bufferevent_setcb(bufev_f->underlying,
+ be_filter_readcb, be_filter_writecb, be_filter_eventcb, bufev_f);
+
+ bufev_f->outbuf_cb = evbuffer_add_cb(downcast(bufev_f)->output,
+ bufferevent_filtered_outbuf_cb, bufev_f);
+
+ bufferevent_init_generic_timeout_cbs_(downcast(bufev_f));
+ bufferevent_incref_(underlying);
+
+ bufferevent_enable(underlying, EV_READ|EV_WRITE);
+ bufferevent_suspend_read_(underlying, BEV_SUSPEND_FILT_READ);
+
+ return downcast(bufev_f);
+}
+
+static void
+be_filter_unlink(struct bufferevent *bev)
+{
+ struct bufferevent_filtered *bevf = upcast(bev);
+ EVUTIL_ASSERT(bevf);
+
+ if (bevf->bev.options & BEV_OPT_CLOSE_ON_FREE) {
+ /* Yes, there is also a decref in bufferevent_decref_.
+ * That decref corresponds to the incref when we set
+ * underlying for the first time. This decref is an
+ * extra one to remove the last reference.
+ */
+ if (BEV_UPCAST(bevf->underlying)->refcnt < 2) {
+ event_warnx("BEV_OPT_CLOSE_ON_FREE set on an "
+ "bufferevent with too few references");
+ } else {
+ bufferevent_free(bevf->underlying);
+ }
+ } else {
+ if (bevf->underlying) {
+ if (bevf->underlying->errorcb == be_filter_eventcb)
+ bufferevent_setcb(bevf->underlying,
+ NULL, NULL, NULL, NULL);
+ bufferevent_unsuspend_read_(bevf->underlying,
+ BEV_SUSPEND_FILT_READ);
+ }
+ }
+}
+
+static void
+be_filter_destruct(struct bufferevent *bev)
+{
+ struct bufferevent_filtered *bevf = upcast(bev);
+ EVUTIL_ASSERT(bevf);
+ if (bevf->free_context)
+ bevf->free_context(bevf->context);
+}
+
+static int
+be_filter_enable(struct bufferevent *bev, short event)
+{
+ struct bufferevent_filtered *bevf = upcast(bev);
+ if (event & EV_WRITE)
+ BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
+
+ if (event & EV_READ) {
+ BEV_RESET_GENERIC_READ_TIMEOUT(bev);
+ bufferevent_unsuspend_read_(bevf->underlying,
+ BEV_SUSPEND_FILT_READ);
+ }
+ return 0;
+}
+
+static int
+be_filter_disable(struct bufferevent *bev, short event)
+{
+ struct bufferevent_filtered *bevf = upcast(bev);
+ if (event & EV_WRITE)
+ BEV_DEL_GENERIC_WRITE_TIMEOUT(bev);
+ if (event & EV_READ) {
+ BEV_DEL_GENERIC_READ_TIMEOUT(bev);
+ bufferevent_suspend_read_(bevf->underlying,
+ BEV_SUSPEND_FILT_READ);
+ }
+ return 0;
+}
+
+static enum bufferevent_filter_result
+be_filter_process_input(struct bufferevent_filtered *bevf,
+ enum bufferevent_flush_mode state,
+ int *processed_out)
+{
+ enum bufferevent_filter_result res;
+ struct bufferevent *bev = downcast(bevf);
+
+ if (state == BEV_NORMAL) {
+ /* If we're in 'normal' mode, don't urge data on the filter
+ * unless we're reading data and under our high-water mark.*/
+ if (!(bev->enabled & EV_READ) ||
+ be_readbuf_full(bevf, state))
+ return BEV_OK;
+ }
+
+ do {
+ ev_ssize_t limit = -1;
+ if (state == BEV_NORMAL && bev->wm_read.high)
+ limit = bev->wm_read.high -
+ evbuffer_get_length(bev->input);
+
+ res = bevf->process_in(bevf->underlying->input,
+ bev->input, limit, state, bevf->context);
+
+ if (res == BEV_OK)
+ *processed_out = 1;
+ } while (res == BEV_OK &&
+ (bev->enabled & EV_READ) &&
+ evbuffer_get_length(bevf->underlying->input) &&
+ !be_readbuf_full(bevf, state));
+
+ if (*processed_out)
+ BEV_RESET_GENERIC_READ_TIMEOUT(bev);
+
+ return res;
+}
+
+
+static enum bufferevent_filter_result
+be_filter_process_output(struct bufferevent_filtered *bevf,
+ enum bufferevent_flush_mode state,
+ int *processed_out)
+{
+ /* Requires references and lock: might call writecb */
+ enum bufferevent_filter_result res = BEV_OK;
+ struct bufferevent *bufev = downcast(bevf);
+ int again = 0;
+
+ if (state == BEV_NORMAL) {
+ /* If we're in 'normal' mode, don't urge data on the
+ * filter unless we're writing data, and the underlying
+ * bufferevent is accepting data, and we have data to
+ * give the filter. If we're in 'flush' or 'finish',
+ * call the filter no matter what. */
+ if (!(bufev->enabled & EV_WRITE) ||
+ be_underlying_writebuf_full(bevf, state) ||
+ !evbuffer_get_length(bufev->output))
+ return BEV_OK;
+ }
+
+ /* disable the callback that calls this function
+ when the user adds to the output buffer. */
+ evbuffer_cb_set_flags(bufev->output, bevf->outbuf_cb, 0);
+
+ do {
+ int processed = 0;
+ again = 0;
+
+ do {
+ ev_ssize_t limit = -1;
+ if (state == BEV_NORMAL &&
+ bevf->underlying->wm_write.high)
+ limit = bevf->underlying->wm_write.high -
+ evbuffer_get_length(bevf->underlying->output);
+
+ res = bevf->process_out(downcast(bevf)->output,
+ bevf->underlying->output,
+ limit,
+ state,
+ bevf->context);
+
+ if (res == BEV_OK)
+ processed = *processed_out = 1;
+ } while (/* Stop if the filter wasn't successful...*/
+ res == BEV_OK &&
+ /* Or if we aren't writing any more. */
+ (bufev->enabled & EV_WRITE) &&
+ /* Of if we have nothing more to write and we are
+ * not flushing. */
+ evbuffer_get_length(bufev->output) &&
+ /* Or if we have filled the underlying output buffer. */
+ !be_underlying_writebuf_full(bevf,state));
+
+ if (processed) {
+ /* call the write callback.*/
+ bufferevent_trigger_nolock_(bufev, EV_WRITE, 0);
+
+ if (res == BEV_OK &&
+ (bufev->enabled & EV_WRITE) &&
+ evbuffer_get_length(bufev->output) &&
+ !be_underlying_writebuf_full(bevf, state)) {
+ again = 1;
+ }
+ }
+ } while (again);
+
+ /* reenable the outbuf_cb */
+ evbuffer_cb_set_flags(bufev->output,bevf->outbuf_cb,
+ EVBUFFER_CB_ENABLED);
+
+ if (*processed_out)
+ BEV_RESET_GENERIC_WRITE_TIMEOUT(bufev);
+
+ return res;
+}
+
+/* Called when the size of our outbuf changes. */
+static void
+bufferevent_filtered_outbuf_cb(struct evbuffer *buf,
+ const struct evbuffer_cb_info *cbinfo, void *arg)
+{
+ struct bufferevent_filtered *bevf = arg;
+ struct bufferevent *bev = downcast(bevf);
+
+ if (cbinfo->n_added) {
+ int processed_any = 0;
+ /* Somebody added more data to the output buffer. Try to
+ * process it, if we should. */
+ bufferevent_incref_and_lock_(bev);
+ be_filter_process_output(bevf, BEV_NORMAL, &processed_any);
+ bufferevent_decref_and_unlock_(bev);
+ }
+}
+
+/* Called when the underlying socket has read. */
+static void
+be_filter_readcb(struct bufferevent *underlying, void *me_)
+{
+ struct bufferevent_filtered *bevf = me_;
+ enum bufferevent_filter_result res;
+ enum bufferevent_flush_mode state;
+ struct bufferevent *bufev = downcast(bevf);
+ struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);
+ int processed_any = 0;
+
+ BEV_LOCK(bufev);
+
+ // It's possible our refcount is 0 at this point if another thread free'd our filterevent
+ EVUTIL_ASSERT(bufev_private->refcnt >= 0);
+
+ // If our refcount is > 0
+ if (bufev_private->refcnt > 0) {
+
+ if (bevf->got_eof)
+ state = BEV_FINISHED;
+ else
+ state = BEV_NORMAL;
+
+ /* XXXX use return value */
+ res = be_filter_process_input(bevf, state, &processed_any);
+ (void)res;
+
+ /* XXX This should be in process_input, not here. There are
+ * other places that can call process-input, and they should
+ * force readcb calls as needed. */
+ if (processed_any)
+ bufferevent_trigger_nolock_(bufev, EV_READ, 0);
+ }
+
+ BEV_UNLOCK(bufev);
+}
+
+/* Called when the underlying socket has drained enough that we can write to
+ it. */
+static void
+be_filter_writecb(struct bufferevent *underlying, void *me_)
+{
+ struct bufferevent_filtered *bevf = me_;
+ struct bufferevent *bev = downcast(bevf);
+ struct bufferevent_private *bufev_private = BEV_UPCAST(bev);
+ int processed_any = 0;
+
+ BEV_LOCK(bev);
+
+ // It's possible our refcount is 0 at this point if another thread free'd our filterevent
+ EVUTIL_ASSERT(bufev_private->refcnt >= 0);
+
+ // If our refcount is > 0
+ if (bufev_private->refcnt > 0) {
+ be_filter_process_output(bevf, BEV_NORMAL, &processed_any);
+ }
+
+ BEV_UNLOCK(bev);
+}
+
+/* Called when the underlying socket has given us an error */
+static void
+be_filter_eventcb(struct bufferevent *underlying, short what, void *me_)
+{
+ struct bufferevent_filtered *bevf = me_;
+ struct bufferevent *bev = downcast(bevf);
+ struct bufferevent_private *bufev_private = BEV_UPCAST(bev);
+
+ BEV_LOCK(bev);
+
+ // It's possible our refcount is 0 at this point if another thread free'd our filterevent
+ EVUTIL_ASSERT(bufev_private->refcnt >= 0);
+
+ // If our refcount is > 0
+ if (bufev_private->refcnt > 0) {
+
+ /* All we can really to is tell our own eventcb. */
+ bufferevent_run_eventcb_(bev, what, 0);
+ }
+
+ BEV_UNLOCK(bev);
+}
+
+static int
+be_filter_flush(struct bufferevent *bufev,
+ short iotype, enum bufferevent_flush_mode mode)
+{
+ struct bufferevent_filtered *bevf = upcast(bufev);
+ int processed_any = 0;
+ EVUTIL_ASSERT(bevf);
+
+ bufferevent_incref_and_lock_(bufev);
+
+ if (iotype & EV_READ) {
+ be_filter_process_input(bevf, mode, &processed_any);
+ }
+ if (iotype & EV_WRITE) {
+ be_filter_process_output(bevf, mode, &processed_any);
+ }
+ /* XXX check the return value? */
+ /* XXX does this want to recursively call lower-level flushes? */
+ bufferevent_flush(bevf->underlying, iotype, mode);
+
+ bufferevent_decref_and_unlock_(bufev);
+
+ return processed_any;
+}
+
+static int
+be_filter_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
+ union bufferevent_ctrl_data *data)
+{
+ struct bufferevent_filtered *bevf;
+ switch (op) {
+ case BEV_CTRL_GET_UNDERLYING:
+ bevf = upcast(bev);
+ data->ptr = bevf->underlying;
+ return 0;
+ case BEV_CTRL_SET_FD:
+ bevf = upcast(bev);
+
+ if (bevf->underlying &&
+ bevf->underlying->be_ops &&
+ bevf->underlying->be_ops->ctrl) {
+ return (bevf->underlying->be_ops->ctrl)(bevf->underlying, op, data);
+ }
+
+ case BEV_CTRL_GET_FD:
+ case BEV_CTRL_CANCEL_ALL:
+ default:
+ return -1;
+ }
+
+ return -1;
+}
diff --git a/libs/libevent/src/bufferevent_openssl.c b/libs/libevent/src/bufferevent_openssl.c
new file mode 100644
index 0000000000..37478b6a83
--- /dev/null
+++ b/libs/libevent/src/bufferevent_openssl.c
@@ -0,0 +1,1484 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Get rid of OSX 10.7 and greater deprecation warnings.
+#if defined(__APPLE__) && defined(__clang__)
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#endif
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef EVENT__HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifdef _WIN32
+#include <winsock2.h>
+#endif
+
+#include "event2/bufferevent.h"
+#include "event2/bufferevent_struct.h"
+#include "event2/bufferevent_ssl.h"
+#include "event2/buffer.h"
+#include "event2/event.h"
+
+#include "mm-internal.h"
+#include "bufferevent-internal.h"
+#include "log-internal.h"
+
+#include <openssl/bio.h>
+#include <openssl/ssl.h>
+#include <openssl/err.h>
+
+/*
+ * Define an OpenSSL bio that targets a bufferevent.
+ */
+
+/* --------------------
+ A BIO is an OpenSSL abstraction that handles reading and writing data. The
+ library will happily speak SSL over anything that implements a BIO
+ interface.
+
+ Here we define a BIO implementation that directs its output to a
+ bufferevent. We'll want to use this only when none of OpenSSL's built-in
+ IO mechanisms work for us.
+ -------------------- */
+
+/* every BIO type needs its own integer type value. */
+#define BIO_TYPE_LIBEVENT 57
+/* ???? Arguably, we should set BIO_TYPE_FILTER or BIO_TYPE_SOURCE_SINK on
+ * this. */
+
+#if 0
+static void
+print_err(int val)
+{
+ int err;
+ printf("Error was %d\n", val);
+
+ while ((err = ERR_get_error())) {
+ const char *msg = (const char*)ERR_reason_error_string(err);
+ const char *lib = (const char*)ERR_lib_error_string(err);
+ const char *func = (const char*)ERR_func_error_string(err);
+
+ printf("%s in %s %s\n", msg, lib, func);
+ }
+}
+#else
+#define print_err(v) ((void)0)
+#endif
+
+/* Called to initialize a new BIO */
+static int
+bio_bufferevent_new(BIO *b)
+{
+ b->init = 0;
+ b->num = -1;
+ b->ptr = NULL; /* We'll be putting the bufferevent in this field.*/
+ b->flags = 0;
+ return 1;
+}
+
+/* Called to uninitialize the BIO. */
+static int
+bio_bufferevent_free(BIO *b)
+{
+ if (!b)
+ return 0;
+ if (b->shutdown) {
+ if (b->init && b->ptr)
+ bufferevent_free(b->ptr);
+ b->init = 0;
+ b->flags = 0;
+ b->ptr = NULL;
+ }
+ return 1;
+}
+
+/* Called to extract data from the BIO. */
+static int
+bio_bufferevent_read(BIO *b, char *out, int outlen)
+{
+ int r = 0;
+ struct evbuffer *input;
+
+ BIO_clear_retry_flags(b);
+
+ if (!out)
+ return 0;
+ if (!b->ptr)
+ return -1;
+
+ input = bufferevent_get_input(b->ptr);
+ if (evbuffer_get_length(input) == 0) {
+ /* If there's no data to read, say so. */
+ BIO_set_retry_read(b);
+ return -1;
+ } else {
+ r = evbuffer_remove(input, out, outlen);
+ }
+
+ return r;
+}
+
+/* Called to write data info the BIO */
+static int
+bio_bufferevent_write(BIO *b, const char *in, int inlen)
+{
+ struct bufferevent *bufev = b->ptr;
+ struct evbuffer *output;
+ size_t outlen;
+
+ BIO_clear_retry_flags(b);
+
+ if (!b->ptr)
+ return -1;
+
+ output = bufferevent_get_output(bufev);
+ outlen = evbuffer_get_length(output);
+
+ /* Copy only as much data onto the output buffer as can fit under the
+ * high-water mark. */
+ if (bufev->wm_write.high && bufev->wm_write.high <= (outlen+inlen)) {
+ if (bufev->wm_write.high <= outlen) {
+ /* If no data can fit, we'll need to retry later. */
+ BIO_set_retry_write(b);
+ return -1;
+ }
+ inlen = bufev->wm_write.high - outlen;
+ }
+
+ EVUTIL_ASSERT(inlen > 0);
+ evbuffer_add(output, in, inlen);
+ return inlen;
+}
+
+/* Called to handle various requests */
+static long
+bio_bufferevent_ctrl(BIO *b, int cmd, long num, void *ptr)
+{
+ struct bufferevent *bufev = b->ptr;
+ long ret = 1;
+
+ switch (cmd) {
+ case BIO_CTRL_GET_CLOSE:
+ ret = b->shutdown;
+ break;
+ case BIO_CTRL_SET_CLOSE:
+ b->shutdown = (int)num;
+ break;
+ case BIO_CTRL_PENDING:
+ ret = evbuffer_get_length(bufferevent_get_input(bufev)) != 0;
+ break;
+ case BIO_CTRL_WPENDING:
+ ret = evbuffer_get_length(bufferevent_get_output(bufev)) != 0;
+ break;
+ /* XXXX These two are given a special-case treatment because
+ * of cargo-cultism. I should come up with a better reason. */
+ case BIO_CTRL_DUP:
+ case BIO_CTRL_FLUSH:
+ ret = 1;
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
+/* Called to write a string to the BIO */
+static int
+bio_bufferevent_puts(BIO *b, const char *s)
+{
+ return bio_bufferevent_write(b, s, strlen(s));
+}
+
+/* Method table for the bufferevent BIO */
+static BIO_METHOD methods_bufferevent = {
+ BIO_TYPE_LIBEVENT, "bufferevent",
+ bio_bufferevent_write,
+ bio_bufferevent_read,
+ bio_bufferevent_puts,
+ NULL /* bio_bufferevent_gets */,
+ bio_bufferevent_ctrl,
+ bio_bufferevent_new,
+ bio_bufferevent_free,
+ NULL /* callback_ctrl */,
+};
+
+/* Return the method table for the bufferevents BIO */
+static BIO_METHOD *
+BIO_s_bufferevent(void)
+{
+ return &methods_bufferevent;
+}
+
+/* Create a new BIO to wrap communication around a bufferevent. If close_flag
+ * is true, the bufferevent will be freed when the BIO is closed. */
+static BIO *
+BIO_new_bufferevent(struct bufferevent *bufferevent, int close_flag)
+{
+ BIO *result;
+ if (!bufferevent)
+ return NULL;
+ if (!(result = BIO_new(BIO_s_bufferevent())))
+ return NULL;
+ result->init = 1;
+ result->ptr = bufferevent;
+ result->shutdown = close_flag ? 1 : 0;
+ return result;
+}
+
+/* --------------------
+ Now, here's the OpenSSL-based implementation of bufferevent.
+
+ The implementation comes in two flavors: one that connects its SSL object
+ to an underlying bufferevent using a BIO_bufferevent, and one that has the
+ SSL object connect to a socket directly. The latter should generally be
+ faster, except on Windows, where your best bet is using a
+ bufferevent_async.
+
+ (OpenSSL supports many other BIO types, too. But we can't use any unless
+ we have a good way to get notified when they become readable/writable.)
+ -------------------- */
+
+struct bio_data_counts {
+ unsigned long n_written;
+ unsigned long n_read;
+};
+
+struct bufferevent_openssl {
+ /* Shared fields with common bufferevent implementation code.
+ If we were set up with an underlying bufferevent, we use the
+ events here as timers only. If we have an SSL, then we use
+ the events as socket events.
+ */
+ struct bufferevent_private bev;
+ /* An underlying bufferevent that we're directing our output to.
+ If it's NULL, then we're connected to an fd, not an evbuffer. */
+ struct bufferevent *underlying;
+ /* The SSL object doing our encryption. */
+ SSL *ssl;
+
+ /* A callback that's invoked when data arrives on our outbuf so we
+ know to write data to the SSL. */
+ struct evbuffer_cb_entry *outbuf_cb;
+
+ /* A count of how much data the bios have read/written total. Used
+ for rate-limiting. */
+ struct bio_data_counts counts;
+
+ /* If this value is greater than 0, then the last SSL_write blocked,
+ * and we need to try it again with this many bytes. */
+ ev_ssize_t last_write;
+
+#define NUM_ERRORS 3
+ ev_uint32_t errors[NUM_ERRORS];
+
+ /* When we next get available space, we should say "read" instead of
+ "write". This can happen if there's a renegotiation during a read
+ operation. */
+ unsigned read_blocked_on_write : 1;
+ /* When we next get data, we should say "write" instead of "read". */
+ unsigned write_blocked_on_read : 1;
+ /* Treat TCP close before SSL close on SSL >= v3 as clean EOF. */
+ unsigned allow_dirty_shutdown : 1;
+ /* XXX */
+ unsigned n_errors : 2;
+
+ /* Are we currently connecting, accepting, or doing IO? */
+ unsigned state : 2;
+};
+
+static int be_openssl_enable(struct bufferevent *, short);
+static int be_openssl_disable(struct bufferevent *, short);
+static void be_openssl_unlink(struct bufferevent *);
+static void be_openssl_destruct(struct bufferevent *);
+static int be_openssl_adj_timeouts(struct bufferevent *);
+static int be_openssl_flush(struct bufferevent *bufev,
+ short iotype, enum bufferevent_flush_mode mode);
+static int be_openssl_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
+
+const struct bufferevent_ops bufferevent_ops_openssl = {
+ "ssl",
+ evutil_offsetof(struct bufferevent_openssl, bev.bev),
+ be_openssl_enable,
+ be_openssl_disable,
+ be_openssl_unlink,
+ be_openssl_destruct,
+ be_openssl_adj_timeouts,
+ be_openssl_flush,
+ be_openssl_ctrl,
+};
+
+/* Given a bufferevent, return a pointer to the bufferevent_openssl that
+ * contains it, if any. */
+static inline struct bufferevent_openssl *
+upcast(struct bufferevent *bev)
+{
+ struct bufferevent_openssl *bev_o;
+ if (bev->be_ops != &bufferevent_ops_openssl)
+ return NULL;
+ bev_o = (void*)( ((char*)bev) -
+ evutil_offsetof(struct bufferevent_openssl, bev.bev));
+ EVUTIL_ASSERT(bev_o->bev.bev.be_ops == &bufferevent_ops_openssl);
+ return bev_o;
+}
+
+static inline void
+put_error(struct bufferevent_openssl *bev_ssl, unsigned long err)
+{
+ if (bev_ssl->n_errors == NUM_ERRORS)
+ return;
+ /* The error type according to openssl is "unsigned long", but
+ openssl never uses more than 32 bits of it. It _can't_ use more
+ than 32 bits of it, since it needs to report errors on systems
+ where long is only 32 bits.
+ */
+ bev_ssl->errors[bev_ssl->n_errors++] = (ev_uint32_t) err;
+}
+
+/* Have the base communications channel (either the underlying bufferevent or
+ * ev_read and ev_write) start reading. Take the read-blocked-on-write flag
+ * into account. */
+static int
+start_reading(struct bufferevent_openssl *bev_ssl)
+{
+ if (bev_ssl->underlying) {
+ bufferevent_unsuspend_read_(bev_ssl->underlying,
+ BEV_SUSPEND_FILT_READ);
+ return 0;
+ } else {
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ int r;
+ r = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read);
+ if (r == 0 && bev_ssl->read_blocked_on_write)
+ r = bufferevent_add_event_(&bev->ev_write,
+ &bev->timeout_write);
+ return r;
+ }
+}
+
+/* Have the base communications channel (either the underlying bufferevent or
+ * ev_read and ev_write) start writing. Take the write-blocked-on-read flag
+ * into account. */
+static int
+start_writing(struct bufferevent_openssl *bev_ssl)
+{
+ int r = 0;
+ if (bev_ssl->underlying) {
+ ;
+ } else {
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ r = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write);
+ if (!r && bev_ssl->write_blocked_on_read)
+ r = bufferevent_add_event_(&bev->ev_read,
+ &bev->timeout_read);
+ }
+ return r;
+}
+
+static void
+stop_reading(struct bufferevent_openssl *bev_ssl)
+{
+ if (bev_ssl->write_blocked_on_read)
+ return;
+ if (bev_ssl->underlying) {
+ bufferevent_suspend_read_(bev_ssl->underlying,
+ BEV_SUSPEND_FILT_READ);
+ } else {
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ event_del(&bev->ev_read);
+ }
+}
+
+static void
+stop_writing(struct bufferevent_openssl *bev_ssl)
+{
+ if (bev_ssl->read_blocked_on_write)
+ return;
+ if (bev_ssl->underlying) {
+ ;
+ } else {
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ event_del(&bev->ev_write);
+ }
+}
+
+static int
+set_rbow(struct bufferevent_openssl *bev_ssl)
+{
+ if (!bev_ssl->underlying)
+ stop_reading(bev_ssl);
+ bev_ssl->read_blocked_on_write = 1;
+ return start_writing(bev_ssl);
+}
+
+static int
+set_wbor(struct bufferevent_openssl *bev_ssl)
+{
+ if (!bev_ssl->underlying)
+ stop_writing(bev_ssl);
+ bev_ssl->write_blocked_on_read = 1;
+ return start_reading(bev_ssl);
+}
+
+static int
+clear_rbow(struct bufferevent_openssl *bev_ssl)
+{
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ int r = 0;
+ bev_ssl->read_blocked_on_write = 0;
+ if (!(bev->enabled & EV_WRITE))
+ stop_writing(bev_ssl);
+ if (bev->enabled & EV_READ)
+ r = start_reading(bev_ssl);
+ return r;
+}
+
+
+static int
+clear_wbor(struct bufferevent_openssl *bev_ssl)
+{
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ int r = 0;
+ bev_ssl->write_blocked_on_read = 0;
+ if (!(bev->enabled & EV_READ))
+ stop_reading(bev_ssl);
+ if (bev->enabled & EV_WRITE)
+ r = start_writing(bev_ssl);
+ return r;
+}
+
+static void
+conn_closed(struct bufferevent_openssl *bev_ssl, int when, int errcode, int ret)
+{
+ int event = BEV_EVENT_ERROR;
+ int dirty_shutdown = 0;
+ unsigned long err;
+
+ switch (errcode) {
+ case SSL_ERROR_ZERO_RETURN:
+ /* Possibly a clean shutdown. */
+ if (SSL_get_shutdown(bev_ssl->ssl) & SSL_RECEIVED_SHUTDOWN)
+ event = BEV_EVENT_EOF;
+ else
+ dirty_shutdown = 1;
+ break;
+ case SSL_ERROR_SYSCALL:
+ /* IO error; possibly a dirty shutdown. */
+ if (ret == 0 && ERR_peek_error() == 0)
+ dirty_shutdown = 1;
+ break;
+ case SSL_ERROR_SSL:
+ /* Protocol error. */
+ break;
+ case SSL_ERROR_WANT_X509_LOOKUP:
+ /* XXXX handle this. */
+ break;
+ case SSL_ERROR_NONE:
+ case SSL_ERROR_WANT_READ:
+ case SSL_ERROR_WANT_WRITE:
+ case SSL_ERROR_WANT_CONNECT:
+ case SSL_ERROR_WANT_ACCEPT:
+ default:
+ /* should be impossible; treat as normal error. */
+ event_warnx("BUG: Unexpected OpenSSL error code %d", errcode);
+ break;
+ }
+
+ while ((err = ERR_get_error())) {
+ put_error(bev_ssl, err);
+ }
+
+ if (dirty_shutdown && bev_ssl->allow_dirty_shutdown)
+ event = BEV_EVENT_EOF;
+
+ stop_reading(bev_ssl);
+ stop_writing(bev_ssl);
+
+ /* when is BEV_EVENT_{READING|WRITING} */
+ event = when | event;
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev, event, 0);
+}
+
+static void
+init_bio_counts(struct bufferevent_openssl *bev_ssl)
+{
+ bev_ssl->counts.n_written =
+ BIO_number_written(SSL_get_wbio(bev_ssl->ssl));
+ bev_ssl->counts.n_read =
+ BIO_number_read(SSL_get_rbio(bev_ssl->ssl));
+}
+
+static inline void
+decrement_buckets(struct bufferevent_openssl *bev_ssl)
+{
+ unsigned long num_w = BIO_number_written(SSL_get_wbio(bev_ssl->ssl));
+ unsigned long num_r = BIO_number_read(SSL_get_rbio(bev_ssl->ssl));
+ /* These next two subtractions can wrap around. That's okay. */
+ unsigned long w = num_w - bev_ssl->counts.n_written;
+ unsigned long r = num_r - bev_ssl->counts.n_read;
+ if (w)
+ bufferevent_decrement_write_buckets_(&bev_ssl->bev, w);
+ if (r)
+ bufferevent_decrement_read_buckets_(&bev_ssl->bev, r);
+ bev_ssl->counts.n_written = num_w;
+ bev_ssl->counts.n_read = num_r;
+}
+
+#define OP_MADE_PROGRESS 1
+#define OP_BLOCKED 2
+#define OP_ERR 4
+
+/* Return a bitmask of OP_MADE_PROGRESS (if we read anything); OP_BLOCKED (if
+ we're now blocked); and OP_ERR (if an error occurred). */
+static int
+do_read(struct bufferevent_openssl *bev_ssl, int n_to_read) {
+ /* Requires lock */
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ struct evbuffer *input = bev->input;
+ int r, n, i, n_used = 0, atmost;
+ struct evbuffer_iovec space[2];
+ int result = 0;
+
+ if (bev_ssl->bev.read_suspended)
+ return 0;
+
+ atmost = bufferevent_get_read_max_(&bev_ssl->bev);
+ if (n_to_read > atmost)
+ n_to_read = atmost;
+
+ n = evbuffer_reserve_space(input, n_to_read, space, 2);
+ if (n < 0)
+ return OP_ERR;
+
+ for (i=0; i<n; ++i) {
+ if (bev_ssl->bev.read_suspended)
+ break;
+ r = SSL_read(bev_ssl->ssl, space[i].iov_base, space[i].iov_len);
+ if (r>0) {
+ result |= OP_MADE_PROGRESS;
+ if (bev_ssl->read_blocked_on_write)
+ if (clear_rbow(bev_ssl) < 0)
+ return OP_ERR | result;
+ ++n_used;
+ space[i].iov_len = r;
+ decrement_buckets(bev_ssl);
+ } else {
+ int err = SSL_get_error(bev_ssl->ssl, r);
+ print_err(err);
+ switch (err) {
+ case SSL_ERROR_WANT_READ:
+ /* Can't read until underlying has more data. */
+ if (bev_ssl->read_blocked_on_write)
+ if (clear_rbow(bev_ssl) < 0)
+ return OP_ERR | result;
+ break;
+ case SSL_ERROR_WANT_WRITE:
+ /* This read operation requires a write, and the
+ * underlying is full */
+ if (!bev_ssl->read_blocked_on_write)
+ if (set_rbow(bev_ssl) < 0)
+ return OP_ERR | result;
+ break;
+ default:
+ conn_closed(bev_ssl, BEV_EVENT_READING, err, r);
+ break;
+ }
+ result |= OP_BLOCKED;
+ break; /* out of the loop */
+ }
+ }
+
+ if (n_used) {
+ evbuffer_commit_space(input, space, n_used);
+ if (bev_ssl->underlying)
+ BEV_RESET_GENERIC_READ_TIMEOUT(bev);
+ }
+
+ return result;
+}
+
+/* Return a bitmask of OP_MADE_PROGRESS (if we wrote anything); OP_BLOCKED (if
+ we're now blocked); and OP_ERR (if an error occurred). */
+static int
+do_write(struct bufferevent_openssl *bev_ssl, int atmost)
+{
+ int i, r, n, n_written = 0;
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ struct evbuffer *output = bev->output;
+ struct evbuffer_iovec space[8];
+ int result = 0;
+
+ if (bev_ssl->last_write > 0)
+ atmost = bev_ssl->last_write;
+ else
+ atmost = bufferevent_get_write_max_(&bev_ssl->bev);
+
+ n = evbuffer_peek(output, atmost, NULL, space, 8);
+ if (n < 0)
+ return OP_ERR | result;
+
+ if (n > 8)
+ n = 8;
+ for (i=0; i < n; ++i) {
+ if (bev_ssl->bev.write_suspended)
+ break;
+
+ /* SSL_write will (reasonably) return 0 if we tell it to
+ send 0 data. Skip this case so we don't interpret the
+ result as an error */
+ if (space[i].iov_len == 0)
+ continue;
+
+ r = SSL_write(bev_ssl->ssl, space[i].iov_base,
+ space[i].iov_len);
+ if (r > 0) {
+ result |= OP_MADE_PROGRESS;
+ if (bev_ssl->write_blocked_on_read)
+ if (clear_wbor(bev_ssl) < 0)
+ return OP_ERR | result;
+ n_written += r;
+ bev_ssl->last_write = -1;
+ decrement_buckets(bev_ssl);
+ } else {
+ int err = SSL_get_error(bev_ssl->ssl, r);
+ print_err(err);
+ switch (err) {
+ case SSL_ERROR_WANT_WRITE:
+ /* Can't read until underlying has more data. */
+ if (bev_ssl->write_blocked_on_read)
+ if (clear_wbor(bev_ssl) < 0)
+ return OP_ERR | result;
+ bev_ssl->last_write = space[i].iov_len;
+ break;
+ case SSL_ERROR_WANT_READ:
+ /* This read operation requires a write, and the
+ * underlying is full */
+ if (!bev_ssl->write_blocked_on_read)
+ if (set_wbor(bev_ssl) < 0)
+ return OP_ERR | result;
+ bev_ssl->last_write = space[i].iov_len;
+ break;
+ default:
+ conn_closed(bev_ssl, BEV_EVENT_WRITING, err, r);
+ bev_ssl->last_write = -1;
+ break;
+ }
+ result |= OP_BLOCKED;
+ break;
+ }
+ }
+ if (n_written) {
+ evbuffer_drain(output, n_written);
+ if (bev_ssl->underlying)
+ BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
+
+ bufferevent_trigger_nolock_(bev, EV_WRITE, 0);
+ }
+ return result;
+}
+
+#define WRITE_FRAME 15000
+
+#define READ_DEFAULT 4096
+
+/* Try to figure out how many bytes to read; return 0 if we shouldn't be
+ * reading. */
+static int
+bytes_to_read(struct bufferevent_openssl *bev)
+{
+ struct evbuffer *input = bev->bev.bev.input;
+ struct event_watermark *wm = &bev->bev.bev.wm_read;
+ int result = READ_DEFAULT;
+ ev_ssize_t limit;
+ /* XXX 99% of this is generic code that nearly all bufferevents will
+ * want. */
+
+ if (bev->write_blocked_on_read) {
+ return 0;
+ }
+
+ if (! (bev->bev.bev.enabled & EV_READ)) {
+ return 0;
+ }
+
+ if (bev->bev.read_suspended) {
+ return 0;
+ }
+
+ if (wm->high) {
+ if (evbuffer_get_length(input) >= wm->high) {
+ return 0;
+ }
+
+ result = wm->high - evbuffer_get_length(input);
+ } else {
+ result = READ_DEFAULT;
+ }
+
+ /* Respect the rate limit */
+ limit = bufferevent_get_read_max_(&bev->bev);
+ if (result > limit) {
+ result = limit;
+ }
+
+ return result;
+}
+
+
+/* Things look readable. If write is blocked on read, write till it isn't.
+ * Read from the underlying buffer until we block or we hit our high-water
+ * mark.
+ */
+static void
+consider_reading(struct bufferevent_openssl *bev_ssl)
+{
+ int r;
+ int n_to_read;
+ int all_result_flags = 0;
+
+ while (bev_ssl->write_blocked_on_read) {
+ r = do_write(bev_ssl, WRITE_FRAME);
+ if (r & (OP_BLOCKED|OP_ERR))
+ break;
+ }
+ if (bev_ssl->write_blocked_on_read)
+ return;
+
+ n_to_read = bytes_to_read(bev_ssl);
+
+ while (n_to_read) {
+ r = do_read(bev_ssl, n_to_read);
+ all_result_flags |= r;
+
+ if (r & (OP_BLOCKED|OP_ERR))
+ break;
+
+ if (bev_ssl->bev.read_suspended)
+ break;
+
+ /* Read all pending data. This won't hit the network
+ * again, and will (most importantly) put us in a state
+ * where we don't need to read anything else until the
+ * socket is readable again. It'll potentially make us
+ * overrun our read high-watermark (somewhat
+ * regrettable). The damage to the rate-limit has
+ * already been done, since OpenSSL went and read a
+ * whole SSL record anyway. */
+ n_to_read = SSL_pending(bev_ssl->ssl);
+
+ /* XXX This if statement is actually a bad bug, added to avoid
+ * XXX a worse bug.
+ *
+ * The bad bug: It can potentially cause resource unfairness
+ * by reading too much data from the underlying bufferevent;
+ * it can potentially cause read looping if the underlying
+ * bufferevent is a bufferevent_pair and deferred callbacks
+ * aren't used.
+ *
+ * The worse bug: If we didn't do this, then we would
+ * potentially not read any more from bev_ssl->underlying
+ * until more data arrived there, which could lead to us
+ * waiting forever.
+ */
+ if (!n_to_read && bev_ssl->underlying)
+ n_to_read = bytes_to_read(bev_ssl);
+ }
+
+ if (all_result_flags & OP_MADE_PROGRESS) {
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+
+ bufferevent_trigger_nolock_(bev, EV_READ, 0);
+ }
+
+ if (!bev_ssl->underlying) {
+ /* Should be redundant, but let's avoid busy-looping */
+ if (bev_ssl->bev.read_suspended ||
+ !(bev_ssl->bev.bev.enabled & EV_READ)) {
+ event_del(&bev_ssl->bev.bev.ev_read);
+ }
+ }
+}
+
+static void
+consider_writing(struct bufferevent_openssl *bev_ssl)
+{
+ int r;
+ struct evbuffer *output = bev_ssl->bev.bev.output;
+ struct evbuffer *target = NULL;
+ struct event_watermark *wm = NULL;
+
+ while (bev_ssl->read_blocked_on_write) {
+ r = do_read(bev_ssl, 1024); /* XXXX 1024 is a hack */
+ if (r & OP_MADE_PROGRESS) {
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+
+ bufferevent_trigger_nolock_(bev, EV_READ, 0);
+ }
+ if (r & (OP_ERR|OP_BLOCKED))
+ break;
+ }
+ if (bev_ssl->read_blocked_on_write)
+ return;
+ if (bev_ssl->underlying) {
+ target = bev_ssl->underlying->output;
+ wm = &bev_ssl->underlying->wm_write;
+ }
+ while ((bev_ssl->bev.bev.enabled & EV_WRITE) &&
+ (! bev_ssl->bev.write_suspended) &&
+ evbuffer_get_length(output) &&
+ (!target || (! wm->high || evbuffer_get_length(target) < wm->high))) {
+ int n_to_write;
+ if (wm && wm->high)
+ n_to_write = wm->high - evbuffer_get_length(target);
+ else
+ n_to_write = WRITE_FRAME;
+ r = do_write(bev_ssl, n_to_write);
+ if (r & (OP_BLOCKED|OP_ERR))
+ break;
+ }
+
+ if (!bev_ssl->underlying) {
+ if (evbuffer_get_length(output) == 0) {
+ event_del(&bev_ssl->bev.bev.ev_write);
+ } else if (bev_ssl->bev.write_suspended ||
+ !(bev_ssl->bev.bev.enabled & EV_WRITE)) {
+ /* Should be redundant, but let's avoid busy-looping */
+ event_del(&bev_ssl->bev.bev.ev_write);
+ }
+ }
+}
+
+static void
+be_openssl_readcb(struct bufferevent *bev_base, void *ctx)
+{
+ struct bufferevent_openssl *bev_ssl = ctx;
+ consider_reading(bev_ssl);
+}
+
+static void
+be_openssl_writecb(struct bufferevent *bev_base, void *ctx)
+{
+ struct bufferevent_openssl *bev_ssl = ctx;
+ consider_writing(bev_ssl);
+}
+
+static void
+be_openssl_eventcb(struct bufferevent *bev_base, short what, void *ctx)
+{
+ struct bufferevent_openssl *bev_ssl = ctx;
+ int event = 0;
+
+ if (what & BEV_EVENT_EOF) {
+ if (bev_ssl->allow_dirty_shutdown)
+ event = BEV_EVENT_EOF;
+ else
+ event = BEV_EVENT_ERROR;
+ } else if (what & BEV_EVENT_TIMEOUT) {
+ /* We sure didn't set this. Propagate it to the user. */
+ event = what;
+ } else if (what & BEV_EVENT_ERROR) {
+ /* An error occurred on the connection. Propagate it to the user. */
+ event = what;
+ } else if (what & BEV_EVENT_CONNECTED) {
+ /* Ignore it. We're saying SSL_connect() already, which will
+ eat it. */
+ }
+ if (event)
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev, event, 0);
+}
+
+static void
+be_openssl_readeventcb(evutil_socket_t fd, short what, void *ptr)
+{
+ struct bufferevent_openssl *bev_ssl = ptr;
+ bufferevent_incref_and_lock_(&bev_ssl->bev.bev);
+ if (what == EV_TIMEOUT) {
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev,
+ BEV_EVENT_TIMEOUT|BEV_EVENT_READING, 0);
+ } else {
+ consider_reading(bev_ssl);
+ }
+ bufferevent_decref_and_unlock_(&bev_ssl->bev.bev);
+}
+
+static void
+be_openssl_writeeventcb(evutil_socket_t fd, short what, void *ptr)
+{
+ struct bufferevent_openssl *bev_ssl = ptr;
+ bufferevent_incref_and_lock_(&bev_ssl->bev.bev);
+ if (what == EV_TIMEOUT) {
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev,
+ BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING, 0);
+ } else {
+ consider_writing(bev_ssl);
+ }
+ bufferevent_decref_and_unlock_(&bev_ssl->bev.bev);
+}
+
+static int
+be_openssl_auto_fd(struct bufferevent_openssl *bev_ssl, int fd)
+{
+ if (!bev_ssl->underlying) {
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ if (event_initialized(&bev->ev_read) && fd < 0) {
+ fd = event_get_fd(&bev->ev_read);
+ }
+ }
+ return fd;
+}
+
+static int
+set_open_callbacks(struct bufferevent_openssl *bev_ssl, evutil_socket_t fd)
+{
+ if (bev_ssl->underlying) {
+ bufferevent_setcb(bev_ssl->underlying,
+ be_openssl_readcb, be_openssl_writecb, be_openssl_eventcb,
+ bev_ssl);
+ return 0;
+ } else {
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ int rpending=0, wpending=0, r1=0, r2=0;
+
+ if (event_initialized(&bev->ev_read)) {
+ rpending = event_pending(&bev->ev_read, EV_READ, NULL);
+ wpending = event_pending(&bev->ev_write, EV_WRITE, NULL);
+
+ event_del(&bev->ev_read);
+ event_del(&bev->ev_write);
+ }
+
+ event_assign(&bev->ev_read, bev->ev_base, fd,
+ EV_READ|EV_PERSIST|EV_FINALIZE,
+ be_openssl_readeventcb, bev_ssl);
+ event_assign(&bev->ev_write, bev->ev_base, fd,
+ EV_WRITE|EV_PERSIST|EV_FINALIZE,
+ be_openssl_writeeventcb, bev_ssl);
+
+ if (rpending)
+ r1 = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read);
+ if (wpending)
+ r2 = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write);
+
+ return (r1<0 || r2<0) ? -1 : 0;
+ }
+}
+static int
+set_open_callbacks_auto(struct bufferevent_openssl *bev_ssl, evutil_socket_t fd)
+{
+ fd = be_openssl_auto_fd(bev_ssl, fd);
+ return set_open_callbacks(bev_ssl, fd);
+}
+
+static int
+do_handshake(struct bufferevent_openssl *bev_ssl)
+{
+ int r;
+
+ switch (bev_ssl->state) {
+ default:
+ case BUFFEREVENT_SSL_OPEN:
+ EVUTIL_ASSERT(0);
+ return -1;
+ case BUFFEREVENT_SSL_CONNECTING:
+ case BUFFEREVENT_SSL_ACCEPTING:
+ r = SSL_do_handshake(bev_ssl->ssl);
+ break;
+ }
+ decrement_buckets(bev_ssl);
+
+ if (r==1) {
+ int fd = event_get_fd(&bev_ssl->bev.bev.ev_read);
+ /* We're done! */
+ bev_ssl->state = BUFFEREVENT_SSL_OPEN;
+ set_open_callbacks(bev_ssl, fd); /* XXXX handle failure */
+ /* Call do_read and do_write as needed */
+ bufferevent_enable(&bev_ssl->bev.bev, bev_ssl->bev.bev.enabled);
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev,
+ BEV_EVENT_CONNECTED, 0);
+ return 1;
+ } else {
+ int err = SSL_get_error(bev_ssl->ssl, r);
+ print_err(err);
+ switch (err) {
+ case SSL_ERROR_WANT_WRITE:
+ if (!bev_ssl->underlying) {
+ stop_reading(bev_ssl);
+ return start_writing(bev_ssl);
+ }
+ return 0;
+ case SSL_ERROR_WANT_READ:
+ if (!bev_ssl->underlying) {
+ stop_writing(bev_ssl);
+ return start_reading(bev_ssl);
+ }
+ return 0;
+ default:
+ conn_closed(bev_ssl, BEV_EVENT_READING, err, r);
+ return -1;
+ }
+ }
+}
+
+static void
+be_openssl_handshakecb(struct bufferevent *bev_base, void *ctx)
+{
+ struct bufferevent_openssl *bev_ssl = ctx;
+ do_handshake(bev_ssl);/* XXX handle failure */
+}
+
+static void
+be_openssl_handshakeeventcb(evutil_socket_t fd, short what, void *ptr)
+{
+ struct bufferevent_openssl *bev_ssl = ptr;
+
+ bufferevent_incref_and_lock_(&bev_ssl->bev.bev);
+ if (what & EV_TIMEOUT) {
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev, BEV_EVENT_TIMEOUT, 0);
+ } else
+ do_handshake(bev_ssl);/* XXX handle failure */
+ bufferevent_decref_and_unlock_(&bev_ssl->bev.bev);
+}
+
+static int
+set_handshake_callbacks(struct bufferevent_openssl *bev_ssl, evutil_socket_t fd)
+{
+ if (bev_ssl->underlying) {
+ bufferevent_setcb(bev_ssl->underlying,
+ be_openssl_handshakecb, be_openssl_handshakecb,
+ be_openssl_eventcb,
+ bev_ssl);
+ return do_handshake(bev_ssl);
+ } else {
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+
+ if (event_initialized(&bev->ev_read)) {
+ event_del(&bev->ev_read);
+ event_del(&bev->ev_write);
+ }
+
+ event_assign(&bev->ev_read, bev->ev_base, fd,
+ EV_READ|EV_PERSIST|EV_FINALIZE,
+ be_openssl_handshakeeventcb, bev_ssl);
+ event_assign(&bev->ev_write, bev->ev_base, fd,
+ EV_WRITE|EV_PERSIST|EV_FINALIZE,
+ be_openssl_handshakeeventcb, bev_ssl);
+ if (fd >= 0)
+ bufferevent_enable(bev, bev->enabled);
+ return 0;
+ }
+}
+
+static int
+set_handshake_callbacks_auto(struct bufferevent_openssl *bev_ssl, evutil_socket_t fd)
+{
+ fd = be_openssl_auto_fd(bev_ssl, fd);
+ return set_handshake_callbacks(bev_ssl, fd);
+}
+
+int
+bufferevent_ssl_renegotiate(struct bufferevent *bev)
+{
+ struct bufferevent_openssl *bev_ssl = upcast(bev);
+ if (!bev_ssl)
+ return -1;
+ if (SSL_renegotiate(bev_ssl->ssl) < 0)
+ return -1;
+ bev_ssl->state = BUFFEREVENT_SSL_CONNECTING;
+ if (set_handshake_callbacks_auto(bev_ssl, -1) < 0)
+ return -1;
+ if (!bev_ssl->underlying)
+ return do_handshake(bev_ssl);
+ return 0;
+}
+
+static void
+be_openssl_outbuf_cb(struct evbuffer *buf,
+ const struct evbuffer_cb_info *cbinfo, void *arg)
+{
+ struct bufferevent_openssl *bev_ssl = arg;
+ int r = 0;
+ /* XXX need to hold a reference here. */
+
+ if (cbinfo->n_added && bev_ssl->state == BUFFEREVENT_SSL_OPEN &&
+ cbinfo->orig_size == 0) {
+ r = bufferevent_add_event_(&bev_ssl->bev.bev.ev_write,
+ &bev_ssl->bev.bev.timeout_write);
+ }
+ /* XXX Handle r < 0 */
+ (void)r;
+}
+
+
+static int
+be_openssl_enable(struct bufferevent *bev, short events)
+{
+ struct bufferevent_openssl *bev_ssl = upcast(bev);
+ int r1 = 0, r2 = 0;
+
+ if (events & EV_READ)
+ r1 = start_reading(bev_ssl);
+ if (events & EV_WRITE)
+ r2 = start_writing(bev_ssl);
+
+ if (bev_ssl->underlying) {
+ if (events & EV_READ)
+ BEV_RESET_GENERIC_READ_TIMEOUT(bev);
+ if (events & EV_WRITE)
+ BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
+
+ if (events & EV_READ)
+ consider_reading(bev_ssl);
+ if (events & EV_WRITE)
+ consider_writing(bev_ssl);
+ }
+ return (r1 < 0 || r2 < 0) ? -1 : 0;
+}
+
+static int
+be_openssl_disable(struct bufferevent *bev, short events)
+{
+ struct bufferevent_openssl *bev_ssl = upcast(bev);
+
+ if (events & EV_READ)
+ stop_reading(bev_ssl);
+ if (events & EV_WRITE)
+ stop_writing(bev_ssl);
+
+ if (bev_ssl->underlying) {
+ if (events & EV_READ)
+ BEV_DEL_GENERIC_READ_TIMEOUT(bev);
+ if (events & EV_WRITE)
+ BEV_DEL_GENERIC_WRITE_TIMEOUT(bev);
+ }
+ return 0;
+}
+
+static void
+be_openssl_unlink(struct bufferevent *bev)
+{
+ struct bufferevent_openssl *bev_ssl = upcast(bev);
+
+ if (bev_ssl->bev.options & BEV_OPT_CLOSE_ON_FREE) {
+ if (bev_ssl->underlying) {
+ if (BEV_UPCAST(bev_ssl->underlying)->refcnt < 2) {
+ event_warnx("BEV_OPT_CLOSE_ON_FREE set on an "
+ "bufferevent with too few references");
+ } else {
+ bufferevent_free(bev_ssl->underlying);
+ /* We still have a reference to it, via our
+ * BIO. So we don't drop this. */
+ // bev_ssl->underlying = NULL;
+ }
+ }
+ } else {
+ if (bev_ssl->underlying) {
+ if (bev_ssl->underlying->errorcb == be_openssl_eventcb)
+ bufferevent_setcb(bev_ssl->underlying,
+ NULL,NULL,NULL,NULL);
+ bufferevent_unsuspend_read_(bev_ssl->underlying,
+ BEV_SUSPEND_FILT_READ);
+ }
+ }
+}
+
+static void
+be_openssl_destruct(struct bufferevent *bev)
+{
+ struct bufferevent_openssl *bev_ssl = upcast(bev);
+
+ if (bev_ssl->bev.options & BEV_OPT_CLOSE_ON_FREE) {
+ if (! bev_ssl->underlying) {
+ evutil_socket_t fd = -1;
+ BIO *bio = SSL_get_wbio(bev_ssl->ssl);
+ if (bio)
+ fd = BIO_get_fd(bio, NULL);
+ if (fd >= 0)
+ evutil_closesocket(fd);
+ }
+ SSL_free(bev_ssl->ssl);
+ }
+}
+
+static int
+be_openssl_adj_timeouts(struct bufferevent *bev)
+{
+ struct bufferevent_openssl *bev_ssl = upcast(bev);
+
+ if (bev_ssl->underlying) {
+ return bufferevent_generic_adj_timeouts_(bev);
+ } else {
+ return bufferevent_generic_adj_existing_timeouts_(bev);
+ }
+}
+
+static int
+be_openssl_flush(struct bufferevent *bufev,
+ short iotype, enum bufferevent_flush_mode mode)
+{
+ /* XXXX Implement this. */
+ return 0;
+}
+
+static int
+be_openssl_ctrl(struct bufferevent *bev,
+ enum bufferevent_ctrl_op op, union bufferevent_ctrl_data *data)
+{
+ struct bufferevent_openssl *bev_ssl = upcast(bev);
+ switch (op) {
+ case BEV_CTRL_SET_FD:
+ if (bev_ssl->underlying)
+ return -1;
+ {
+ BIO *bio;
+ bio = BIO_new_socket(data->fd, 0);
+ SSL_set_bio(bev_ssl->ssl, bio, bio);
+ }
+ if (bev_ssl->state == BUFFEREVENT_SSL_OPEN && data->fd >= 0)
+ return set_open_callbacks(bev_ssl, data->fd);
+ else {
+ return set_handshake_callbacks(bev_ssl, data->fd);
+ }
+ case BEV_CTRL_GET_FD:
+ data->fd = event_get_fd(&bev->ev_read);
+ return 0;
+ case BEV_CTRL_GET_UNDERLYING:
+ data->ptr = bev_ssl->underlying;
+ return 0;
+ case BEV_CTRL_CANCEL_ALL:
+ default:
+ return -1;
+ }
+}
+
+SSL *
+bufferevent_openssl_get_ssl(struct bufferevent *bufev)
+{
+ struct bufferevent_openssl *bev_ssl = upcast(bufev);
+ if (!bev_ssl)
+ return NULL;
+ return bev_ssl->ssl;
+}
+
+static struct bufferevent *
+bufferevent_openssl_new_impl(struct event_base *base,
+ struct bufferevent *underlying,
+ evutil_socket_t fd,
+ SSL *ssl,
+ enum bufferevent_ssl_state state,
+ int options)
+{
+ struct bufferevent_openssl *bev_ssl = NULL;
+ struct bufferevent_private *bev_p = NULL;
+ int tmp_options = options & ~BEV_OPT_THREADSAFE;
+
+ if (underlying != NULL && fd >= 0)
+ return NULL; /* Only one can be set. */
+
+ if (!(bev_ssl = mm_calloc(1, sizeof(struct bufferevent_openssl))))
+ goto err;
+
+ bev_p = &bev_ssl->bev;
+
+ if (bufferevent_init_common_(bev_p, base,
+ &bufferevent_ops_openssl, tmp_options) < 0)
+ goto err;
+
+ /* Don't explode if we decide to realloc a chunk we're writing from in
+ * the output buffer. */
+ SSL_set_mode(ssl, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER);
+
+ bev_ssl->underlying = underlying;
+ bev_ssl->ssl = ssl;
+
+ bev_ssl->outbuf_cb = evbuffer_add_cb(bev_p->bev.output,
+ be_openssl_outbuf_cb, bev_ssl);
+
+ if (options & BEV_OPT_THREADSAFE)
+ bufferevent_enable_locking_(&bev_ssl->bev.bev, NULL);
+
+ if (underlying) {
+ bufferevent_init_generic_timeout_cbs_(&bev_ssl->bev.bev);
+ bufferevent_incref_(underlying);
+ }
+
+ bev_ssl->state = state;
+ bev_ssl->last_write = -1;
+
+ init_bio_counts(bev_ssl);
+
+ switch (state) {
+ case BUFFEREVENT_SSL_ACCEPTING:
+ SSL_set_accept_state(bev_ssl->ssl);
+ if (set_handshake_callbacks_auto(bev_ssl, fd) < 0)
+ goto err;
+ break;
+ case BUFFEREVENT_SSL_CONNECTING:
+ SSL_set_connect_state(bev_ssl->ssl);
+ if (set_handshake_callbacks_auto(bev_ssl, fd) < 0)
+ goto err;
+ break;
+ case BUFFEREVENT_SSL_OPEN:
+ if (set_open_callbacks_auto(bev_ssl, fd) < 0)
+ goto err;
+ break;
+ default:
+ goto err;
+ }
+
+ if (underlying) {
+ bufferevent_setwatermark(underlying, EV_READ, 0, 0);
+ bufferevent_enable(underlying, EV_READ|EV_WRITE);
+ if (state == BUFFEREVENT_SSL_OPEN)
+ bufferevent_suspend_read_(underlying,
+ BEV_SUSPEND_FILT_READ);
+ }
+
+ return &bev_ssl->bev.bev;
+err:
+ if (bev_ssl)
+ bufferevent_free(&bev_ssl->bev.bev);
+ return NULL;
+}
+
+struct bufferevent *
+bufferevent_openssl_filter_new(struct event_base *base,
+ struct bufferevent *underlying,
+ SSL *ssl,
+ enum bufferevent_ssl_state state,
+ int options)
+{
+ /* We don't tell the BIO to close the bufferevent; we do it ourselves
+ * on be_openssl_destruct */
+ int close_flag = 0; /* options & BEV_OPT_CLOSE_ON_FREE; */
+ BIO *bio;
+ if (!underlying)
+ return NULL;
+ if (!(bio = BIO_new_bufferevent(underlying, close_flag)))
+ return NULL;
+
+ SSL_set_bio(ssl, bio, bio);
+
+ return bufferevent_openssl_new_impl(
+ base, underlying, -1, ssl, state, options);
+}
+
+struct bufferevent *
+bufferevent_openssl_socket_new(struct event_base *base,
+ evutil_socket_t fd,
+ SSL *ssl,
+ enum bufferevent_ssl_state state,
+ int options)
+{
+ /* Does the SSL already have an fd? */
+ BIO *bio = SSL_get_wbio(ssl);
+ long have_fd = -1;
+
+ if (bio)
+ have_fd = BIO_get_fd(bio, NULL);
+
+ if (have_fd >= 0) {
+ /* The SSL is already configured with an fd. */
+ if (fd < 0) {
+ /* We should learn the fd from the SSL. */
+ fd = (evutil_socket_t) have_fd;
+ } else if (have_fd == (long)fd) {
+ /* We already know the fd from the SSL; do nothing */
+ } else {
+ /* We specified an fd different from that of the SSL.
+ This is probably an error on our part. Fail. */
+ return NULL;
+ }
+ (void) BIO_set_close(bio, 0);
+ } else {
+ /* The SSL isn't configured with a BIO with an fd. */
+ if (fd >= 0) {
+ /* ... and we have an fd we want to use. */
+ bio = BIO_new_socket(fd, 0);
+ SSL_set_bio(ssl, bio, bio);
+ } else {
+ /* Leave the fd unset. */
+ }
+ }
+
+ return bufferevent_openssl_new_impl(
+ base, NULL, fd, ssl, state, options);
+}
+
+int
+bufferevent_openssl_get_allow_dirty_shutdown(struct bufferevent *bev)
+{
+ int allow_dirty_shutdown = -1;
+ struct bufferevent_openssl *bev_ssl;
+ BEV_LOCK(bev);
+ bev_ssl = upcast(bev);
+ if (bev_ssl)
+ allow_dirty_shutdown = bev_ssl->allow_dirty_shutdown;
+ BEV_UNLOCK(bev);
+ return allow_dirty_shutdown;
+}
+
+void
+bufferevent_openssl_set_allow_dirty_shutdown(struct bufferevent *bev,
+ int allow_dirty_shutdown)
+{
+ struct bufferevent_openssl *bev_ssl;
+ BEV_LOCK(bev);
+ bev_ssl = upcast(bev);
+ if (bev_ssl)
+ bev_ssl->allow_dirty_shutdown = !!allow_dirty_shutdown;
+ BEV_UNLOCK(bev);
+}
+
+unsigned long
+bufferevent_get_openssl_error(struct bufferevent *bev)
+{
+ unsigned long err = 0;
+ struct bufferevent_openssl *bev_ssl;
+ BEV_LOCK(bev);
+ bev_ssl = upcast(bev);
+ if (bev_ssl && bev_ssl->n_errors) {
+ err = bev_ssl->errors[--bev_ssl->n_errors];
+ }
+ BEV_UNLOCK(bev);
+ return err;
+}
diff --git a/libs/libevent/src/bufferevent_pair.c b/libs/libevent/src/bufferevent_pair.c
new file mode 100644
index 0000000000..d80e5f81d6
--- /dev/null
+++ b/libs/libevent/src/bufferevent_pair.c
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos, Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+
+#ifdef _WIN32
+#include <winsock2.h>
+#endif
+
+#include "event2/util.h"
+#include "event2/buffer.h"
+#include "event2/bufferevent.h"
+#include "event2/bufferevent_struct.h"
+#include "event2/event.h"
+#include "defer-internal.h"
+#include "bufferevent-internal.h"
+#include "mm-internal.h"
+#include "util-internal.h"
+
+struct bufferevent_pair {
+ struct bufferevent_private bev;
+ struct bufferevent_pair *partner;
+ /* For ->destruct() lock checking */
+ struct bufferevent_pair *unlinked_partner;
+};
+
+
+/* Given a bufferevent that's really a bev part of a bufferevent_pair,
+ * return that bufferevent_filtered. Returns NULL otherwise.*/
+static inline struct bufferevent_pair *
+upcast(struct bufferevent *bev)
+{
+ struct bufferevent_pair *bev_p;
+ if (bev->be_ops != &bufferevent_ops_pair)
+ return NULL;
+ bev_p = EVUTIL_UPCAST(bev, struct bufferevent_pair, bev.bev);
+ EVUTIL_ASSERT(bev_p->bev.bev.be_ops == &bufferevent_ops_pair);
+ return bev_p;
+}
+
+#define downcast(bev_pair) (&(bev_pair)->bev.bev)
+
+static inline void
+incref_and_lock(struct bufferevent *b)
+{
+ struct bufferevent_pair *bevp;
+ bufferevent_incref_and_lock_(b);
+ bevp = upcast(b);
+ if (bevp->partner)
+ bufferevent_incref_and_lock_(downcast(bevp->partner));
+}
+
+static inline void
+decref_and_unlock(struct bufferevent *b)
+{
+ struct bufferevent_pair *bevp = upcast(b);
+ if (bevp->partner)
+ bufferevent_decref_and_unlock_(downcast(bevp->partner));
+ bufferevent_decref_and_unlock_(b);
+}
+
+/* XXX Handle close */
+
+static void be_pair_outbuf_cb(struct evbuffer *,
+ const struct evbuffer_cb_info *, void *);
+
+static struct bufferevent_pair *
+bufferevent_pair_elt_new(struct event_base *base,
+ int options)
+{
+ struct bufferevent_pair *bufev;
+ if (! (bufev = mm_calloc(1, sizeof(struct bufferevent_pair))))
+ return NULL;
+ if (bufferevent_init_common_(&bufev->bev, base, &bufferevent_ops_pair,
+ options)) {
+ mm_free(bufev);
+ return NULL;
+ }
+ if (!evbuffer_add_cb(bufev->bev.bev.output, be_pair_outbuf_cb, bufev)) {
+ bufferevent_free(downcast(bufev));
+ return NULL;
+ }
+
+ bufferevent_init_generic_timeout_cbs_(&bufev->bev.bev);
+
+ return bufev;
+}
+
+int
+bufferevent_pair_new(struct event_base *base, int options,
+ struct bufferevent *pair[2])
+{
+ struct bufferevent_pair *bufev1 = NULL, *bufev2 = NULL;
+ int tmp_options;
+
+ options |= BEV_OPT_DEFER_CALLBACKS;
+ tmp_options = options & ~BEV_OPT_THREADSAFE;
+
+ bufev1 = bufferevent_pair_elt_new(base, options);
+ if (!bufev1)
+ return -1;
+ bufev2 = bufferevent_pair_elt_new(base, tmp_options);
+ if (!bufev2) {
+ bufferevent_free(downcast(bufev1));
+ return -1;
+ }
+
+ if (options & BEV_OPT_THREADSAFE) {
+ /*XXXX check return */
+ bufferevent_enable_locking_(downcast(bufev2), bufev1->bev.lock);
+ }
+
+ bufev1->partner = bufev2;
+ bufev2->partner = bufev1;
+
+ evbuffer_freeze(downcast(bufev1)->input, 0);
+ evbuffer_freeze(downcast(bufev1)->output, 1);
+ evbuffer_freeze(downcast(bufev2)->input, 0);
+ evbuffer_freeze(downcast(bufev2)->output, 1);
+
+ pair[0] = downcast(bufev1);
+ pair[1] = downcast(bufev2);
+
+ return 0;
+}
+
+static void
+be_pair_transfer(struct bufferevent *src, struct bufferevent *dst,
+ int ignore_wm)
+{
+ size_t dst_size;
+ size_t n;
+
+ evbuffer_unfreeze(src->output, 1);
+ evbuffer_unfreeze(dst->input, 0);
+
+ if (dst->wm_read.high) {
+ dst_size = evbuffer_get_length(dst->input);
+ if (dst_size < dst->wm_read.high) {
+ n = dst->wm_read.high - dst_size;
+ evbuffer_remove_buffer(src->output, dst->input, n);
+ } else {
+ if (!ignore_wm)
+ goto done;
+ n = evbuffer_get_length(src->output);
+ evbuffer_add_buffer(dst->input, src->output);
+ }
+ } else {
+ n = evbuffer_get_length(src->output);
+ evbuffer_add_buffer(dst->input, src->output);
+ }
+
+ if (n) {
+ BEV_RESET_GENERIC_READ_TIMEOUT(dst);
+
+ if (evbuffer_get_length(dst->output))
+ BEV_RESET_GENERIC_WRITE_TIMEOUT(dst);
+ else
+ BEV_DEL_GENERIC_WRITE_TIMEOUT(dst);
+ }
+
+ bufferevent_trigger_nolock_(dst, EV_READ, 0);
+ bufferevent_trigger_nolock_(src, EV_WRITE, 0);
+done:
+ evbuffer_freeze(src->output, 1);
+ evbuffer_freeze(dst->input, 0);
+}
+
+static inline int
+be_pair_wants_to_talk(struct bufferevent_pair *src,
+ struct bufferevent_pair *dst)
+{
+ return (downcast(src)->enabled & EV_WRITE) &&
+ (downcast(dst)->enabled & EV_READ) &&
+ !dst->bev.read_suspended &&
+ evbuffer_get_length(downcast(src)->output);
+}
+
+static void
+be_pair_outbuf_cb(struct evbuffer *outbuf,
+ const struct evbuffer_cb_info *info, void *arg)
+{
+ struct bufferevent_pair *bev_pair = arg;
+ struct bufferevent_pair *partner = bev_pair->partner;
+
+ incref_and_lock(downcast(bev_pair));
+
+ if (info->n_added > info->n_deleted && partner) {
+ /* We got more data. If the other side's reading, then
+ hand it over. */
+ if (be_pair_wants_to_talk(bev_pair, partner)) {
+ be_pair_transfer(downcast(bev_pair), downcast(partner), 0);
+ }
+ }
+
+ decref_and_unlock(downcast(bev_pair));
+}
+
+static int
+be_pair_enable(struct bufferevent *bufev, short events)
+{
+ struct bufferevent_pair *bev_p = upcast(bufev);
+ struct bufferevent_pair *partner = bev_p->partner;
+
+ incref_and_lock(bufev);
+
+ if (events & EV_READ) {
+ BEV_RESET_GENERIC_READ_TIMEOUT(bufev);
+ }
+ if ((events & EV_WRITE) && evbuffer_get_length(bufev->output))
+ BEV_RESET_GENERIC_WRITE_TIMEOUT(bufev);
+
+ /* We're starting to read! Does the other side have anything to write?*/
+ if ((events & EV_READ) && partner &&
+ be_pair_wants_to_talk(partner, bev_p)) {
+ be_pair_transfer(downcast(partner), bufev, 0);
+ }
+ /* We're starting to write! Does the other side want to read? */
+ if ((events & EV_WRITE) && partner &&
+ be_pair_wants_to_talk(bev_p, partner)) {
+ be_pair_transfer(bufev, downcast(partner), 0);
+ }
+ decref_and_unlock(bufev);
+ return 0;
+}
+
+static int
+be_pair_disable(struct bufferevent *bev, short events)
+{
+ if (events & EV_READ) {
+ BEV_DEL_GENERIC_READ_TIMEOUT(bev);
+ }
+ if (events & EV_WRITE) {
+ BEV_DEL_GENERIC_WRITE_TIMEOUT(bev);
+ }
+ return 0;
+}
+
+static void
+be_pair_unlink(struct bufferevent *bev)
+{
+ struct bufferevent_pair *bev_p = upcast(bev);
+
+ if (bev_p->partner) {
+ bev_p->unlinked_partner = bev_p->partner;
+ bev_p->partner->partner = NULL;
+ bev_p->partner = NULL;
+ }
+}
+
+/* Free *shared* lock in the latest be (since we share it between two of them). */
+static void
+be_pair_destruct(struct bufferevent *bev)
+{
+ struct bufferevent_pair *bev_p = upcast(bev);
+
+ /* Transfer ownership of the lock into partner, otherwise we will use
+ * already free'd lock during freeing second bev, see next example:
+ *
+ * bev1->own_lock = 1
+ * bev2->own_lock = 0
+ * bev2->lock = bev1->lock
+ *
+ * bufferevent_free(bev1) # refcnt == 0 -> unlink
+ * bufferevent_free(bev2) # refcnt == 0 -> unlink
+ *
+ * event_base_free() -> finilizers -> EVTHREAD_FREE_LOCK(bev1->lock)
+ * -> BEV_LOCK(bev2->lock) <-- already freed
+ *
+ * Where bev1 == pair[0], bev2 == pair[1].
+ */
+ if (bev_p->unlinked_partner && bev_p->bev.own_lock) {
+ bev_p->unlinked_partner->bev.own_lock = 1;
+ bev_p->bev.own_lock = 0;
+ }
+ bev_p->unlinked_partner = NULL;
+}
+
+static int
+be_pair_flush(struct bufferevent *bev, short iotype,
+ enum bufferevent_flush_mode mode)
+{
+ struct bufferevent_pair *bev_p = upcast(bev);
+ struct bufferevent *partner;
+
+ if (!bev_p->partner)
+ return -1;
+
+ if (mode == BEV_NORMAL)
+ return 0;
+
+ incref_and_lock(bev);
+
+ partner = downcast(bev_p->partner);
+
+ if ((iotype & EV_READ) != 0)
+ be_pair_transfer(partner, bev, 1);
+
+ if ((iotype & EV_WRITE) != 0)
+ be_pair_transfer(bev, partner, 1);
+
+ if (mode == BEV_FINISHED) {
+ bufferevent_run_eventcb_(partner, iotype|BEV_EVENT_EOF, 0);
+ }
+ decref_and_unlock(bev);
+ return 0;
+}
+
+struct bufferevent *
+bufferevent_pair_get_partner(struct bufferevent *bev)
+{
+ struct bufferevent_pair *bev_p;
+ struct bufferevent *partner = NULL;
+ bev_p = upcast(bev);
+ if (! bev_p)
+ return NULL;
+
+ incref_and_lock(bev);
+ if (bev_p->partner)
+ partner = downcast(bev_p->partner);
+ decref_and_unlock(bev);
+ return partner;
+}
+
+const struct bufferevent_ops bufferevent_ops_pair = {
+ "pair_elt",
+ evutil_offsetof(struct bufferevent_pair, bev.bev),
+ be_pair_enable,
+ be_pair_disable,
+ be_pair_unlink,
+ be_pair_destruct,
+ bufferevent_generic_adj_timeouts_,
+ be_pair_flush,
+ NULL, /* ctrl */
+};
diff --git a/libs/libevent/src/bufferevent_ratelim.c b/libs/libevent/src/bufferevent_ratelim.c
new file mode 100644
index 0000000000..bde192021b
--- /dev/null
+++ b/libs/libevent/src/bufferevent_ratelim.c
@@ -0,0 +1,1092 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ * Copyright (c) 2002-2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+#include <limits.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "event2/util.h"
+#include "event2/bufferevent.h"
+#include "event2/bufferevent_struct.h"
+#include "event2/buffer.h"
+
+#include "ratelim-internal.h"
+
+#include "bufferevent-internal.h"
+#include "mm-internal.h"
+#include "util-internal.h"
+#include "event-internal.h"
+
+int
+ev_token_bucket_init_(struct ev_token_bucket *bucket,
+ const struct ev_token_bucket_cfg *cfg,
+ ev_uint32_t current_tick,
+ int reinitialize)
+{
+ if (reinitialize) {
+ /* on reinitialization, we only clip downwards, since we've
+ already used who-knows-how-much bandwidth this tick. We
+ leave "last_updated" as it is; the next update will add the
+ appropriate amount of bandwidth to the bucket.
+ */
+ if (bucket->read_limit > (ev_int64_t) cfg->read_maximum)
+ bucket->read_limit = cfg->read_maximum;
+ if (bucket->write_limit > (ev_int64_t) cfg->write_maximum)
+ bucket->write_limit = cfg->write_maximum;
+ } else {
+ bucket->read_limit = cfg->read_rate;
+ bucket->write_limit = cfg->write_rate;
+ bucket->last_updated = current_tick;
+ }
+ return 0;
+}
+
+int
+ev_token_bucket_update_(struct ev_token_bucket *bucket,
+ const struct ev_token_bucket_cfg *cfg,
+ ev_uint32_t current_tick)
+{
+ /* It's okay if the tick number overflows, since we'll just
+ * wrap around when we do the unsigned substraction. */
+ unsigned n_ticks = current_tick - bucket->last_updated;
+
+ /* Make sure some ticks actually happened, and that time didn't
+ * roll back. */
+ if (n_ticks == 0 || n_ticks > INT_MAX)
+ return 0;
+
+ /* Naively, we would say
+ bucket->limit += n_ticks * cfg->rate;
+
+ if (bucket->limit > cfg->maximum)
+ bucket->limit = cfg->maximum;
+
+ But we're worried about overflow, so we do it like this:
+ */
+
+ if ((cfg->read_maximum - bucket->read_limit) / n_ticks < cfg->read_rate)
+ bucket->read_limit = cfg->read_maximum;
+ else
+ bucket->read_limit += n_ticks * cfg->read_rate;
+
+
+ if ((cfg->write_maximum - bucket->write_limit) / n_ticks < cfg->write_rate)
+ bucket->write_limit = cfg->write_maximum;
+ else
+ bucket->write_limit += n_ticks * cfg->write_rate;
+
+
+ bucket->last_updated = current_tick;
+
+ return 1;
+}
+
+static inline void
+bufferevent_update_buckets(struct bufferevent_private *bev)
+{
+ /* Must hold lock on bev. */
+ struct timeval now;
+ unsigned tick;
+ event_base_gettimeofday_cached(bev->bev.ev_base, &now);
+ tick = ev_token_bucket_get_tick_(&now, bev->rate_limiting->cfg);
+ if (tick != bev->rate_limiting->limit.last_updated)
+ ev_token_bucket_update_(&bev->rate_limiting->limit,
+ bev->rate_limiting->cfg, tick);
+}
+
+ev_uint32_t
+ev_token_bucket_get_tick_(const struct timeval *tv,
+ const struct ev_token_bucket_cfg *cfg)
+{
+ /* This computation uses two multiplies and a divide. We could do
+ * fewer if we knew that the tick length was an integer number of
+ * seconds, or if we knew it divided evenly into a second. We should
+ * investigate that more.
+ */
+
+ /* We cast to an ev_uint64_t first, since we don't want to overflow
+ * before we do the final divide. */
+ ev_uint64_t msec = (ev_uint64_t)tv->tv_sec * 1000 + tv->tv_usec / 1000;
+ return (unsigned)(msec / cfg->msec_per_tick);
+}
+
+struct ev_token_bucket_cfg *
+ev_token_bucket_cfg_new(size_t read_rate, size_t read_burst,
+ size_t write_rate, size_t write_burst,
+ const struct timeval *tick_len)
+{
+ struct ev_token_bucket_cfg *r;
+ struct timeval g;
+ if (! tick_len) {
+ g.tv_sec = 1;
+ g.tv_usec = 0;
+ tick_len = &g;
+ }
+ if (read_rate > read_burst || write_rate > write_burst ||
+ read_rate < 1 || write_rate < 1)
+ return NULL;
+ if (read_rate > EV_RATE_LIMIT_MAX ||
+ write_rate > EV_RATE_LIMIT_MAX ||
+ read_burst > EV_RATE_LIMIT_MAX ||
+ write_burst > EV_RATE_LIMIT_MAX)
+ return NULL;
+ r = mm_calloc(1, sizeof(struct ev_token_bucket_cfg));
+ if (!r)
+ return NULL;
+ r->read_rate = read_rate;
+ r->write_rate = write_rate;
+ r->read_maximum = read_burst;
+ r->write_maximum = write_burst;
+ memcpy(&r->tick_timeout, tick_len, sizeof(struct timeval));
+ r->msec_per_tick = (tick_len->tv_sec * 1000) +
+ (tick_len->tv_usec & COMMON_TIMEOUT_MICROSECONDS_MASK)/1000;
+ return r;
+}
+
+void
+ev_token_bucket_cfg_free(struct ev_token_bucket_cfg *cfg)
+{
+ mm_free(cfg);
+}
+
+/* Default values for max_single_read & max_single_write variables. */
+#define MAX_SINGLE_READ_DEFAULT 16384
+#define MAX_SINGLE_WRITE_DEFAULT 16384
+
+#define LOCK_GROUP(g) EVLOCK_LOCK((g)->lock, 0)
+#define UNLOCK_GROUP(g) EVLOCK_UNLOCK((g)->lock, 0)
+
+static int bev_group_suspend_reading_(struct bufferevent_rate_limit_group *g);
+static int bev_group_suspend_writing_(struct bufferevent_rate_limit_group *g);
+static void bev_group_unsuspend_reading_(struct bufferevent_rate_limit_group *g);
+static void bev_group_unsuspend_writing_(struct bufferevent_rate_limit_group *g);
+
+/** Helper: figure out the maximum amount we should write if is_write, or
+ the maximum amount we should read if is_read. Return that maximum, or
+ 0 if our bucket is wholly exhausted.
+ */
+static inline ev_ssize_t
+bufferevent_get_rlim_max_(struct bufferevent_private *bev, int is_write)
+{
+ /* needs lock on bev. */
+ ev_ssize_t max_so_far = is_write?bev->max_single_write:bev->max_single_read;
+
+#define LIM(x) \
+ (is_write ? (x).write_limit : (x).read_limit)
+
+#define GROUP_SUSPENDED(g) \
+ (is_write ? (g)->write_suspended : (g)->read_suspended)
+
+ /* Sets max_so_far to MIN(x, max_so_far) */
+#define CLAMPTO(x) \
+ do { \
+ if (max_so_far > (x)) \
+ max_so_far = (x); \
+ } while (0);
+
+ if (!bev->rate_limiting)
+ return max_so_far;
+
+ /* If rate-limiting is enabled at all, update the appropriate
+ bucket, and take the smaller of our rate limit and the group
+ rate limit.
+ */
+
+ if (bev->rate_limiting->cfg) {
+ bufferevent_update_buckets(bev);
+ max_so_far = LIM(bev->rate_limiting->limit);
+ }
+ if (bev->rate_limiting->group) {
+ struct bufferevent_rate_limit_group *g =
+ bev->rate_limiting->group;
+ ev_ssize_t share;
+ LOCK_GROUP(g);
+ if (GROUP_SUSPENDED(g)) {
+ /* We can get here if we failed to lock this
+ * particular bufferevent while suspending the whole
+ * group. */
+ if (is_write)
+ bufferevent_suspend_write_(&bev->bev,
+ BEV_SUSPEND_BW_GROUP);
+ else
+ bufferevent_suspend_read_(&bev->bev,
+ BEV_SUSPEND_BW_GROUP);
+ share = 0;
+ } else {
+ /* XXXX probably we should divide among the active
+ * members, not the total members. */
+ share = LIM(g->rate_limit) / g->n_members;
+ if (share < g->min_share)
+ share = g->min_share;
+ }
+ UNLOCK_GROUP(g);
+ CLAMPTO(share);
+ }
+
+ if (max_so_far < 0)
+ max_so_far = 0;
+ return max_so_far;
+}
+
+ev_ssize_t
+bufferevent_get_read_max_(struct bufferevent_private *bev)
+{
+ return bufferevent_get_rlim_max_(bev, 0);
+}
+
+ev_ssize_t
+bufferevent_get_write_max_(struct bufferevent_private *bev)
+{
+ return bufferevent_get_rlim_max_(bev, 1);
+}
+
+int
+bufferevent_decrement_read_buckets_(struct bufferevent_private *bev, ev_ssize_t bytes)
+{
+ /* XXXXX Make sure all users of this function check its return value */
+ int r = 0;
+ /* need to hold lock on bev */
+ if (!bev->rate_limiting)
+ return 0;
+
+ if (bev->rate_limiting->cfg) {
+ bev->rate_limiting->limit.read_limit -= bytes;
+ if (bev->rate_limiting->limit.read_limit <= 0) {
+ bufferevent_suspend_read_(&bev->bev, BEV_SUSPEND_BW);
+ if (event_add(&bev->rate_limiting->refill_bucket_event,
+ &bev->rate_limiting->cfg->tick_timeout) < 0)
+ r = -1;
+ } else if (bev->read_suspended & BEV_SUSPEND_BW) {
+ if (!(bev->write_suspended & BEV_SUSPEND_BW))
+ event_del(&bev->rate_limiting->refill_bucket_event);
+ bufferevent_unsuspend_read_(&bev->bev, BEV_SUSPEND_BW);
+ }
+ }
+
+ if (bev->rate_limiting->group) {
+ LOCK_GROUP(bev->rate_limiting->group);
+ bev->rate_limiting->group->rate_limit.read_limit -= bytes;
+ bev->rate_limiting->group->total_read += bytes;
+ if (bev->rate_limiting->group->rate_limit.read_limit <= 0) {
+ bev_group_suspend_reading_(bev->rate_limiting->group);
+ } else if (bev->rate_limiting->group->read_suspended) {
+ bev_group_unsuspend_reading_(bev->rate_limiting->group);
+ }
+ UNLOCK_GROUP(bev->rate_limiting->group);
+ }
+
+ return r;
+}
+
+int
+bufferevent_decrement_write_buckets_(struct bufferevent_private *bev, ev_ssize_t bytes)
+{
+ /* XXXXX Make sure all users of this function check its return value */
+ int r = 0;
+ /* need to hold lock */
+ if (!bev->rate_limiting)
+ return 0;
+
+ if (bev->rate_limiting->cfg) {
+ bev->rate_limiting->limit.write_limit -= bytes;
+ if (bev->rate_limiting->limit.write_limit <= 0) {
+ bufferevent_suspend_write_(&bev->bev, BEV_SUSPEND_BW);
+ if (event_add(&bev->rate_limiting->refill_bucket_event,
+ &bev->rate_limiting->cfg->tick_timeout) < 0)
+ r = -1;
+ } else if (bev->write_suspended & BEV_SUSPEND_BW) {
+ if (!(bev->read_suspended & BEV_SUSPEND_BW))
+ event_del(&bev->rate_limiting->refill_bucket_event);
+ bufferevent_unsuspend_write_(&bev->bev, BEV_SUSPEND_BW);
+ }
+ }
+
+ if (bev->rate_limiting->group) {
+ LOCK_GROUP(bev->rate_limiting->group);
+ bev->rate_limiting->group->rate_limit.write_limit -= bytes;
+ bev->rate_limiting->group->total_written += bytes;
+ if (bev->rate_limiting->group->rate_limit.write_limit <= 0) {
+ bev_group_suspend_writing_(bev->rate_limiting->group);
+ } else if (bev->rate_limiting->group->write_suspended) {
+ bev_group_unsuspend_writing_(bev->rate_limiting->group);
+ }
+ UNLOCK_GROUP(bev->rate_limiting->group);
+ }
+
+ return r;
+}
+
+/** Stop reading on every bufferevent in <b>g</b> */
+static int
+bev_group_suspend_reading_(struct bufferevent_rate_limit_group *g)
+{
+ /* Needs group lock */
+ struct bufferevent_private *bev;
+ g->read_suspended = 1;
+ g->pending_unsuspend_read = 0;
+
+ /* Note that in this loop we call EVLOCK_TRY_LOCK_ instead of BEV_LOCK,
+ to prevent a deadlock. (Ordinarily, the group lock nests inside
+ the bufferevent locks. If we are unable to lock any individual
+ bufferevent, it will find out later when it looks at its limit
+ and sees that its group is suspended.)
+ */
+ LIST_FOREACH(bev, &g->members, rate_limiting->next_in_group) {
+ if (EVLOCK_TRY_LOCK_(bev->lock)) {
+ bufferevent_suspend_read_(&bev->bev,
+ BEV_SUSPEND_BW_GROUP);
+ EVLOCK_UNLOCK(bev->lock, 0);
+ }
+ }
+ return 0;
+}
+
+/** Stop writing on every bufferevent in <b>g</b> */
+static int
+bev_group_suspend_writing_(struct bufferevent_rate_limit_group *g)
+{
+ /* Needs group lock */
+ struct bufferevent_private *bev;
+ g->write_suspended = 1;
+ g->pending_unsuspend_write = 0;
+ LIST_FOREACH(bev, &g->members, rate_limiting->next_in_group) {
+ if (EVLOCK_TRY_LOCK_(bev->lock)) {
+ bufferevent_suspend_write_(&bev->bev,
+ BEV_SUSPEND_BW_GROUP);
+ EVLOCK_UNLOCK(bev->lock, 0);
+ }
+ }
+ return 0;
+}
+
+/** Timer callback invoked on a single bufferevent with one or more exhausted
+ buckets when they are ready to refill. */
+static void
+bev_refill_callback_(evutil_socket_t fd, short what, void *arg)
+{
+ unsigned tick;
+ struct timeval now;
+ struct bufferevent_private *bev = arg;
+ int again = 0;
+ BEV_LOCK(&bev->bev);
+ if (!bev->rate_limiting || !bev->rate_limiting->cfg) {
+ BEV_UNLOCK(&bev->bev);
+ return;
+ }
+
+ /* First, update the bucket */
+ event_base_gettimeofday_cached(bev->bev.ev_base, &now);
+ tick = ev_token_bucket_get_tick_(&now,
+ bev->rate_limiting->cfg);
+ ev_token_bucket_update_(&bev->rate_limiting->limit,
+ bev->rate_limiting->cfg,
+ tick);
+
+ /* Now unsuspend any read/write operations as appropriate. */
+ if ((bev->read_suspended & BEV_SUSPEND_BW)) {
+ if (bev->rate_limiting->limit.read_limit > 0)
+ bufferevent_unsuspend_read_(&bev->bev, BEV_SUSPEND_BW);
+ else
+ again = 1;
+ }
+ if ((bev->write_suspended & BEV_SUSPEND_BW)) {
+ if (bev->rate_limiting->limit.write_limit > 0)
+ bufferevent_unsuspend_write_(&bev->bev, BEV_SUSPEND_BW);
+ else
+ again = 1;
+ }
+ if (again) {
+ /* One or more of the buckets may need another refill if they
+ started negative.
+
+ XXXX if we need to be quiet for more ticks, we should
+ maybe figure out what timeout we really want.
+ */
+ /* XXXX Handle event_add failure somehow */
+ event_add(&bev->rate_limiting->refill_bucket_event,
+ &bev->rate_limiting->cfg->tick_timeout);
+ }
+ BEV_UNLOCK(&bev->bev);
+}
+
+/** Helper: grab a random element from a bufferevent group.
+ *
+ * Requires that we hold the lock on the group.
+ */
+static struct bufferevent_private *
+bev_group_random_element_(struct bufferevent_rate_limit_group *group)
+{
+ int which;
+ struct bufferevent_private *bev;
+
+ /* requires group lock */
+
+ if (!group->n_members)
+ return NULL;
+
+ EVUTIL_ASSERT(! LIST_EMPTY(&group->members));
+
+ which = evutil_weakrand_range_(&group->weakrand_seed, group->n_members);
+
+ bev = LIST_FIRST(&group->members);
+ while (which--)
+ bev = LIST_NEXT(bev, rate_limiting->next_in_group);
+
+ return bev;
+}
+
+/** Iterate over the elements of a rate-limiting group 'g' with a random
+ starting point, assigning each to the variable 'bev', and executing the
+ block 'block'.
+
+ We do this in a half-baked effort to get fairness among group members.
+ XXX Round-robin or some kind of priority queue would be even more fair.
+ */
+#define FOREACH_RANDOM_ORDER(block) \
+ do { \
+ first = bev_group_random_element_(g); \
+ for (bev = first; bev != LIST_END(&g->members); \
+ bev = LIST_NEXT(bev, rate_limiting->next_in_group)) { \
+ block ; \
+ } \
+ for (bev = LIST_FIRST(&g->members); bev && bev != first; \
+ bev = LIST_NEXT(bev, rate_limiting->next_in_group)) { \
+ block ; \
+ } \
+ } while (0)
+
+static void
+bev_group_unsuspend_reading_(struct bufferevent_rate_limit_group *g)
+{
+ int again = 0;
+ struct bufferevent_private *bev, *first;
+
+ g->read_suspended = 0;
+ FOREACH_RANDOM_ORDER({
+ if (EVLOCK_TRY_LOCK_(bev->lock)) {
+ bufferevent_unsuspend_read_(&bev->bev,
+ BEV_SUSPEND_BW_GROUP);
+ EVLOCK_UNLOCK(bev->lock, 0);
+ } else {
+ again = 1;
+ }
+ });
+ g->pending_unsuspend_read = again;
+}
+
+static void
+bev_group_unsuspend_writing_(struct bufferevent_rate_limit_group *g)
+{
+ int again = 0;
+ struct bufferevent_private *bev, *first;
+ g->write_suspended = 0;
+
+ FOREACH_RANDOM_ORDER({
+ if (EVLOCK_TRY_LOCK_(bev->lock)) {
+ bufferevent_unsuspend_write_(&bev->bev,
+ BEV_SUSPEND_BW_GROUP);
+ EVLOCK_UNLOCK(bev->lock, 0);
+ } else {
+ again = 1;
+ }
+ });
+ g->pending_unsuspend_write = again;
+}
+
+/** Callback invoked every tick to add more elements to the group bucket
+ and unsuspend group members as needed.
+ */
+static void
+bev_group_refill_callback_(evutil_socket_t fd, short what, void *arg)
+{
+ struct bufferevent_rate_limit_group *g = arg;
+ unsigned tick;
+ struct timeval now;
+
+ event_base_gettimeofday_cached(event_get_base(&g->master_refill_event), &now);
+
+ LOCK_GROUP(g);
+
+ tick = ev_token_bucket_get_tick_(&now, &g->rate_limit_cfg);
+ ev_token_bucket_update_(&g->rate_limit, &g->rate_limit_cfg, tick);
+
+ if (g->pending_unsuspend_read ||
+ (g->read_suspended && (g->rate_limit.read_limit >= g->min_share))) {
+ bev_group_unsuspend_reading_(g);
+ }
+ if (g->pending_unsuspend_write ||
+ (g->write_suspended && (g->rate_limit.write_limit >= g->min_share))){
+ bev_group_unsuspend_writing_(g);
+ }
+
+ /* XXXX Rather than waiting to the next tick to unsuspend stuff
+ * with pending_unsuspend_write/read, we should do it on the
+ * next iteration of the mainloop.
+ */
+
+ UNLOCK_GROUP(g);
+}
+
+int
+bufferevent_set_rate_limit(struct bufferevent *bev,
+ struct ev_token_bucket_cfg *cfg)
+{
+ struct bufferevent_private *bevp =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+ int r = -1;
+ struct bufferevent_rate_limit *rlim;
+ struct timeval now;
+ ev_uint32_t tick;
+ int reinit = 0, suspended = 0;
+ /* XXX reference-count cfg */
+
+ BEV_LOCK(bev);
+
+ if (cfg == NULL) {
+ if (bevp->rate_limiting) {
+ rlim = bevp->rate_limiting;
+ rlim->cfg = NULL;
+ bufferevent_unsuspend_read_(bev, BEV_SUSPEND_BW);
+ bufferevent_unsuspend_write_(bev, BEV_SUSPEND_BW);
+ if (event_initialized(&rlim->refill_bucket_event))
+ event_del(&rlim->refill_bucket_event);
+ }
+ r = 0;
+ goto done;
+ }
+
+ event_base_gettimeofday_cached(bev->ev_base, &now);
+ tick = ev_token_bucket_get_tick_(&now, cfg);
+
+ if (bevp->rate_limiting && bevp->rate_limiting->cfg == cfg) {
+ /* no-op */
+ r = 0;
+ goto done;
+ }
+ if (bevp->rate_limiting == NULL) {
+ rlim = mm_calloc(1, sizeof(struct bufferevent_rate_limit));
+ if (!rlim)
+ goto done;
+ bevp->rate_limiting = rlim;
+ } else {
+ rlim = bevp->rate_limiting;
+ }
+ reinit = rlim->cfg != NULL;
+
+ rlim->cfg = cfg;
+ ev_token_bucket_init_(&rlim->limit, cfg, tick, reinit);
+
+ if (reinit) {
+ EVUTIL_ASSERT(event_initialized(&rlim->refill_bucket_event));
+ event_del(&rlim->refill_bucket_event);
+ }
+ event_assign(&rlim->refill_bucket_event, bev->ev_base,
+ -1, EV_FINALIZE, bev_refill_callback_, bevp);
+
+ if (rlim->limit.read_limit > 0) {
+ bufferevent_unsuspend_read_(bev, BEV_SUSPEND_BW);
+ } else {
+ bufferevent_suspend_read_(bev, BEV_SUSPEND_BW);
+ suspended=1;
+ }
+ if (rlim->limit.write_limit > 0) {
+ bufferevent_unsuspend_write_(bev, BEV_SUSPEND_BW);
+ } else {
+ bufferevent_suspend_write_(bev, BEV_SUSPEND_BW);
+ suspended = 1;
+ }
+
+ if (suspended)
+ event_add(&rlim->refill_bucket_event, &cfg->tick_timeout);
+
+ r = 0;
+
+done:
+ BEV_UNLOCK(bev);
+ return r;
+}
+
+struct bufferevent_rate_limit_group *
+bufferevent_rate_limit_group_new(struct event_base *base,
+ const struct ev_token_bucket_cfg *cfg)
+{
+ struct bufferevent_rate_limit_group *g;
+ struct timeval now;
+ ev_uint32_t tick;
+
+ event_base_gettimeofday_cached(base, &now);
+ tick = ev_token_bucket_get_tick_(&now, cfg);
+
+ g = mm_calloc(1, sizeof(struct bufferevent_rate_limit_group));
+ if (!g)
+ return NULL;
+ memcpy(&g->rate_limit_cfg, cfg, sizeof(g->rate_limit_cfg));
+ LIST_INIT(&g->members);
+
+ ev_token_bucket_init_(&g->rate_limit, cfg, tick, 0);
+
+ event_assign(&g->master_refill_event, base, -1, EV_PERSIST|EV_FINALIZE,
+ bev_group_refill_callback_, g);
+ /*XXXX handle event_add failure */
+ event_add(&g->master_refill_event, &cfg->tick_timeout);
+
+ EVTHREAD_ALLOC_LOCK(g->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+
+ bufferevent_rate_limit_group_set_min_share(g, 64);
+
+ evutil_weakrand_seed_(&g->weakrand_seed,
+ (ev_uint32_t) ((now.tv_sec + now.tv_usec) + (ev_intptr_t)g));
+
+ return g;
+}
+
+int
+bufferevent_rate_limit_group_set_cfg(
+ struct bufferevent_rate_limit_group *g,
+ const struct ev_token_bucket_cfg *cfg)
+{
+ int same_tick;
+ if (!g || !cfg)
+ return -1;
+
+ LOCK_GROUP(g);
+ same_tick = evutil_timercmp(
+ &g->rate_limit_cfg.tick_timeout, &cfg->tick_timeout, ==);
+ memcpy(&g->rate_limit_cfg, cfg, sizeof(g->rate_limit_cfg));
+
+ if (g->rate_limit.read_limit > (ev_ssize_t)cfg->read_maximum)
+ g->rate_limit.read_limit = cfg->read_maximum;
+ if (g->rate_limit.write_limit > (ev_ssize_t)cfg->write_maximum)
+ g->rate_limit.write_limit = cfg->write_maximum;
+
+ if (!same_tick) {
+ /* This can cause a hiccup in the schedule */
+ event_add(&g->master_refill_event, &cfg->tick_timeout);
+ }
+
+ /* The new limits might force us to adjust min_share differently. */
+ bufferevent_rate_limit_group_set_min_share(g, g->configured_min_share);
+
+ UNLOCK_GROUP(g);
+ return 0;
+}
+
+int
+bufferevent_rate_limit_group_set_min_share(
+ struct bufferevent_rate_limit_group *g,
+ size_t share)
+{
+ if (share > EV_SSIZE_MAX)
+ return -1;
+
+ g->configured_min_share = share;
+
+ /* Can't set share to less than the one-tick maximum. IOW, at steady
+ * state, at least one connection can go per tick. */
+ if (share > g->rate_limit_cfg.read_rate)
+ share = g->rate_limit_cfg.read_rate;
+ if (share > g->rate_limit_cfg.write_rate)
+ share = g->rate_limit_cfg.write_rate;
+
+ g->min_share = share;
+ return 0;
+}
+
+void
+bufferevent_rate_limit_group_free(struct bufferevent_rate_limit_group *g)
+{
+ LOCK_GROUP(g);
+ EVUTIL_ASSERT(0 == g->n_members);
+ event_del(&g->master_refill_event);
+ UNLOCK_GROUP(g);
+ EVTHREAD_FREE_LOCK(g->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ mm_free(g);
+}
+
+int
+bufferevent_add_to_rate_limit_group(struct bufferevent *bev,
+ struct bufferevent_rate_limit_group *g)
+{
+ int wsuspend, rsuspend;
+ struct bufferevent_private *bevp =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+ BEV_LOCK(bev);
+
+ if (!bevp->rate_limiting) {
+ struct bufferevent_rate_limit *rlim;
+ rlim = mm_calloc(1, sizeof(struct bufferevent_rate_limit));
+ if (!rlim) {
+ BEV_UNLOCK(bev);
+ return -1;
+ }
+ event_assign(&rlim->refill_bucket_event, bev->ev_base,
+ -1, EV_FINALIZE, bev_refill_callback_, bevp);
+ bevp->rate_limiting = rlim;
+ }
+
+ if (bevp->rate_limiting->group == g) {
+ BEV_UNLOCK(bev);
+ return 0;
+ }
+ if (bevp->rate_limiting->group)
+ bufferevent_remove_from_rate_limit_group(bev);
+
+ LOCK_GROUP(g);
+ bevp->rate_limiting->group = g;
+ ++g->n_members;
+ LIST_INSERT_HEAD(&g->members, bevp, rate_limiting->next_in_group);
+
+ rsuspend = g->read_suspended;
+ wsuspend = g->write_suspended;
+
+ UNLOCK_GROUP(g);
+
+ if (rsuspend)
+ bufferevent_suspend_read_(bev, BEV_SUSPEND_BW_GROUP);
+ if (wsuspend)
+ bufferevent_suspend_write_(bev, BEV_SUSPEND_BW_GROUP);
+
+ BEV_UNLOCK(bev);
+ return 0;
+}
+
+int
+bufferevent_remove_from_rate_limit_group(struct bufferevent *bev)
+{
+ return bufferevent_remove_from_rate_limit_group_internal_(bev, 1);
+}
+
+int
+bufferevent_remove_from_rate_limit_group_internal_(struct bufferevent *bev,
+ int unsuspend)
+{
+ struct bufferevent_private *bevp =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+ BEV_LOCK(bev);
+ if (bevp->rate_limiting && bevp->rate_limiting->group) {
+ struct bufferevent_rate_limit_group *g =
+ bevp->rate_limiting->group;
+ LOCK_GROUP(g);
+ bevp->rate_limiting->group = NULL;
+ --g->n_members;
+ LIST_REMOVE(bevp, rate_limiting->next_in_group);
+ UNLOCK_GROUP(g);
+ }
+ if (unsuspend) {
+ bufferevent_unsuspend_read_(bev, BEV_SUSPEND_BW_GROUP);
+ bufferevent_unsuspend_write_(bev, BEV_SUSPEND_BW_GROUP);
+ }
+ BEV_UNLOCK(bev);
+ return 0;
+}
+
+/* ===
+ * API functions to expose rate limits.
+ *
+ * Don't use these from inside Libevent; they're meant to be for use by
+ * the program.
+ * === */
+
+/* Mostly you don't want to use this function from inside libevent;
+ * bufferevent_get_read_max_() is more likely what you want*/
+ev_ssize_t
+bufferevent_get_read_limit(struct bufferevent *bev)
+{
+ ev_ssize_t r;
+ struct bufferevent_private *bevp;
+ BEV_LOCK(bev);
+ bevp = BEV_UPCAST(bev);
+ if (bevp->rate_limiting && bevp->rate_limiting->cfg) {
+ bufferevent_update_buckets(bevp);
+ r = bevp->rate_limiting->limit.read_limit;
+ } else {
+ r = EV_SSIZE_MAX;
+ }
+ BEV_UNLOCK(bev);
+ return r;
+}
+
+/* Mostly you don't want to use this function from inside libevent;
+ * bufferevent_get_write_max_() is more likely what you want*/
+ev_ssize_t
+bufferevent_get_write_limit(struct bufferevent *bev)
+{
+ ev_ssize_t r;
+ struct bufferevent_private *bevp;
+ BEV_LOCK(bev);
+ bevp = BEV_UPCAST(bev);
+ if (bevp->rate_limiting && bevp->rate_limiting->cfg) {
+ bufferevent_update_buckets(bevp);
+ r = bevp->rate_limiting->limit.write_limit;
+ } else {
+ r = EV_SSIZE_MAX;
+ }
+ BEV_UNLOCK(bev);
+ return r;
+}
+
+int
+bufferevent_set_max_single_read(struct bufferevent *bev, size_t size)
+{
+ struct bufferevent_private *bevp;
+ BEV_LOCK(bev);
+ bevp = BEV_UPCAST(bev);
+ if (size == 0 || size > EV_SSIZE_MAX)
+ bevp->max_single_read = MAX_SINGLE_READ_DEFAULT;
+ else
+ bevp->max_single_read = size;
+ BEV_UNLOCK(bev);
+ return 0;
+}
+
+int
+bufferevent_set_max_single_write(struct bufferevent *bev, size_t size)
+{
+ struct bufferevent_private *bevp;
+ BEV_LOCK(bev);
+ bevp = BEV_UPCAST(bev);
+ if (size == 0 || size > EV_SSIZE_MAX)
+ bevp->max_single_write = MAX_SINGLE_WRITE_DEFAULT;
+ else
+ bevp->max_single_write = size;
+ BEV_UNLOCK(bev);
+ return 0;
+}
+
+ev_ssize_t
+bufferevent_get_max_single_read(struct bufferevent *bev)
+{
+ ev_ssize_t r;
+
+ BEV_LOCK(bev);
+ r = BEV_UPCAST(bev)->max_single_read;
+ BEV_UNLOCK(bev);
+ return r;
+}
+
+ev_ssize_t
+bufferevent_get_max_single_write(struct bufferevent *bev)
+{
+ ev_ssize_t r;
+
+ BEV_LOCK(bev);
+ r = BEV_UPCAST(bev)->max_single_write;
+ BEV_UNLOCK(bev);
+ return r;
+}
+
+ev_ssize_t
+bufferevent_get_max_to_read(struct bufferevent *bev)
+{
+ ev_ssize_t r;
+ BEV_LOCK(bev);
+ r = bufferevent_get_read_max_(BEV_UPCAST(bev));
+ BEV_UNLOCK(bev);
+ return r;
+}
+
+ev_ssize_t
+bufferevent_get_max_to_write(struct bufferevent *bev)
+{
+ ev_ssize_t r;
+ BEV_LOCK(bev);
+ r = bufferevent_get_write_max_(BEV_UPCAST(bev));
+ BEV_UNLOCK(bev);
+ return r;
+}
+
+const struct ev_token_bucket_cfg *
+bufferevent_get_token_bucket_cfg(const struct bufferevent *bev) {
+ struct bufferevent_private *bufev_private = BEV_UPCAST(bev);
+ struct ev_token_bucket_cfg *cfg;
+
+ BEV_LOCK(bev);
+
+ if (bufev_private->rate_limiting) {
+ cfg = bufev_private->rate_limiting->cfg;
+ } else {
+ cfg = NULL;
+ }
+
+ BEV_UNLOCK(bev);
+
+ return cfg;
+}
+
+/* Mostly you don't want to use this function from inside libevent;
+ * bufferevent_get_read_max_() is more likely what you want*/
+ev_ssize_t
+bufferevent_rate_limit_group_get_read_limit(
+ struct bufferevent_rate_limit_group *grp)
+{
+ ev_ssize_t r;
+ LOCK_GROUP(grp);
+ r = grp->rate_limit.read_limit;
+ UNLOCK_GROUP(grp);
+ return r;
+}
+
+/* Mostly you don't want to use this function from inside libevent;
+ * bufferevent_get_write_max_() is more likely what you want. */
+ev_ssize_t
+bufferevent_rate_limit_group_get_write_limit(
+ struct bufferevent_rate_limit_group *grp)
+{
+ ev_ssize_t r;
+ LOCK_GROUP(grp);
+ r = grp->rate_limit.write_limit;
+ UNLOCK_GROUP(grp);
+ return r;
+}
+
+int
+bufferevent_decrement_read_limit(struct bufferevent *bev, ev_ssize_t decr)
+{
+ int r = 0;
+ ev_ssize_t old_limit, new_limit;
+ struct bufferevent_private *bevp;
+ BEV_LOCK(bev);
+ bevp = BEV_UPCAST(bev);
+ EVUTIL_ASSERT(bevp->rate_limiting && bevp->rate_limiting->cfg);
+ old_limit = bevp->rate_limiting->limit.read_limit;
+
+ new_limit = (bevp->rate_limiting->limit.read_limit -= decr);
+ if (old_limit > 0 && new_limit <= 0) {
+ bufferevent_suspend_read_(bev, BEV_SUSPEND_BW);
+ if (event_add(&bevp->rate_limiting->refill_bucket_event,
+ &bevp->rate_limiting->cfg->tick_timeout) < 0)
+ r = -1;
+ } else if (old_limit <= 0 && new_limit > 0) {
+ if (!(bevp->write_suspended & BEV_SUSPEND_BW))
+ event_del(&bevp->rate_limiting->refill_bucket_event);
+ bufferevent_unsuspend_read_(bev, BEV_SUSPEND_BW);
+ }
+
+ BEV_UNLOCK(bev);
+ return r;
+}
+
+int
+bufferevent_decrement_write_limit(struct bufferevent *bev, ev_ssize_t decr)
+{
+ /* XXXX this is mostly copy-and-paste from
+ * bufferevent_decrement_read_limit */
+ int r = 0;
+ ev_ssize_t old_limit, new_limit;
+ struct bufferevent_private *bevp;
+ BEV_LOCK(bev);
+ bevp = BEV_UPCAST(bev);
+ EVUTIL_ASSERT(bevp->rate_limiting && bevp->rate_limiting->cfg);
+ old_limit = bevp->rate_limiting->limit.write_limit;
+
+ new_limit = (bevp->rate_limiting->limit.write_limit -= decr);
+ if (old_limit > 0 && new_limit <= 0) {
+ bufferevent_suspend_write_(bev, BEV_SUSPEND_BW);
+ if (event_add(&bevp->rate_limiting->refill_bucket_event,
+ &bevp->rate_limiting->cfg->tick_timeout) < 0)
+ r = -1;
+ } else if (old_limit <= 0 && new_limit > 0) {
+ if (!(bevp->read_suspended & BEV_SUSPEND_BW))
+ event_del(&bevp->rate_limiting->refill_bucket_event);
+ bufferevent_unsuspend_write_(bev, BEV_SUSPEND_BW);
+ }
+
+ BEV_UNLOCK(bev);
+ return r;
+}
+
+int
+bufferevent_rate_limit_group_decrement_read(
+ struct bufferevent_rate_limit_group *grp, ev_ssize_t decr)
+{
+ int r = 0;
+ ev_ssize_t old_limit, new_limit;
+ LOCK_GROUP(grp);
+ old_limit = grp->rate_limit.read_limit;
+ new_limit = (grp->rate_limit.read_limit -= decr);
+
+ if (old_limit > 0 && new_limit <= 0) {
+ bev_group_suspend_reading_(grp);
+ } else if (old_limit <= 0 && new_limit > 0) {
+ bev_group_unsuspend_reading_(grp);
+ }
+
+ UNLOCK_GROUP(grp);
+ return r;
+}
+
+int
+bufferevent_rate_limit_group_decrement_write(
+ struct bufferevent_rate_limit_group *grp, ev_ssize_t decr)
+{
+ int r = 0;
+ ev_ssize_t old_limit, new_limit;
+ LOCK_GROUP(grp);
+ old_limit = grp->rate_limit.write_limit;
+ new_limit = (grp->rate_limit.write_limit -= decr);
+
+ if (old_limit > 0 && new_limit <= 0) {
+ bev_group_suspend_writing_(grp);
+ } else if (old_limit <= 0 && new_limit > 0) {
+ bev_group_unsuspend_writing_(grp);
+ }
+
+ UNLOCK_GROUP(grp);
+ return r;
+}
+
+void
+bufferevent_rate_limit_group_get_totals(struct bufferevent_rate_limit_group *grp,
+ ev_uint64_t *total_read_out, ev_uint64_t *total_written_out)
+{
+ EVUTIL_ASSERT(grp != NULL);
+ if (total_read_out)
+ *total_read_out = grp->total_read;
+ if (total_written_out)
+ *total_written_out = grp->total_written;
+}
+
+void
+bufferevent_rate_limit_group_reset_totals(struct bufferevent_rate_limit_group *grp)
+{
+ grp->total_read = grp->total_written = 0;
+}
+
+int
+bufferevent_ratelim_init_(struct bufferevent_private *bev)
+{
+ bev->rate_limiting = NULL;
+ bev->max_single_read = MAX_SINGLE_READ_DEFAULT;
+ bev->max_single_write = MAX_SINGLE_WRITE_DEFAULT;
+
+ return 0;
+}
diff --git a/libs/libevent/src/bufferevent_sock.c b/libs/libevent/src/bufferevent_sock.c
new file mode 100644
index 0000000000..a2b381ac4d
--- /dev/null
+++ b/libs/libevent/src/bufferevent_sock.c
@@ -0,0 +1,707 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ * Copyright (c) 2002-2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef EVENT__HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#endif
+
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#ifdef EVENT__HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+#ifdef EVENT__HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif
+
+#include "event2/util.h"
+#include "event2/bufferevent.h"
+#include "event2/buffer.h"
+#include "event2/bufferevent_struct.h"
+#include "event2/bufferevent_compat.h"
+#include "event2/event.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "bufferevent-internal.h"
+#include "util-internal.h"
+#ifdef _WIN32
+#include "iocp-internal.h"
+#endif
+
+/* prototypes */
+static int be_socket_enable(struct bufferevent *, short);
+static int be_socket_disable(struct bufferevent *, short);
+static void be_socket_destruct(struct bufferevent *);
+static int be_socket_flush(struct bufferevent *, short, enum bufferevent_flush_mode);
+static int be_socket_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
+
+static void be_socket_setfd(struct bufferevent *, evutil_socket_t);
+
+const struct bufferevent_ops bufferevent_ops_socket = {
+ "socket",
+ evutil_offsetof(struct bufferevent_private, bev),
+ be_socket_enable,
+ be_socket_disable,
+ NULL, /* unlink */
+ be_socket_destruct,
+ bufferevent_generic_adj_existing_timeouts_,
+ be_socket_flush,
+ be_socket_ctrl,
+};
+
+const struct sockaddr*
+bufferevent_socket_get_conn_address_(struct bufferevent *bev)
+{
+ struct bufferevent_private *bev_p =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+
+ return (struct sockaddr *)&bev_p->conn_address;
+}
+static void
+bufferevent_socket_set_conn_address_fd(struct bufferevent_private *bev_p, int fd)
+{
+ socklen_t len = sizeof(bev_p->conn_address);
+
+ struct sockaddr *addr = (struct sockaddr *)&bev_p->conn_address;
+ if (addr->sa_family != AF_UNSPEC)
+ getpeername(fd, addr, &len);
+}
+static void
+bufferevent_socket_set_conn_address(struct bufferevent_private *bev_p,
+ struct sockaddr *addr, size_t addrlen)
+{
+ EVUTIL_ASSERT(addrlen <= sizeof(bev_p->conn_address));
+ memcpy(&bev_p->conn_address, addr, addrlen);
+}
+
+static void
+bufferevent_socket_outbuf_cb(struct evbuffer *buf,
+ const struct evbuffer_cb_info *cbinfo,
+ void *arg)
+{
+ struct bufferevent *bufev = arg;
+ struct bufferevent_private *bufev_p =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+
+ if (cbinfo->n_added &&
+ (bufev->enabled & EV_WRITE) &&
+ !event_pending(&bufev->ev_write, EV_WRITE, NULL) &&
+ !bufev_p->write_suspended) {
+ /* Somebody added data to the buffer, and we would like to
+ * write, and we were not writing. So, start writing. */
+ if (bufferevent_add_event_(&bufev->ev_write, &bufev->timeout_write) == -1) {
+ /* Should we log this? */
+ }
+ }
+}
+
+static void
+bufferevent_readcb(evutil_socket_t fd, short event, void *arg)
+{
+ struct bufferevent *bufev = arg;
+ struct bufferevent_private *bufev_p =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ struct evbuffer *input;
+ int res = 0;
+ short what = BEV_EVENT_READING;
+ ev_ssize_t howmuch = -1, readmax=-1;
+
+ bufferevent_incref_and_lock_(bufev);
+
+ if (event == EV_TIMEOUT) {
+ /* Note that we only check for event==EV_TIMEOUT. If
+ * event==EV_TIMEOUT|EV_READ, we can safely ignore the
+ * timeout, since a read has occurred */
+ what |= BEV_EVENT_TIMEOUT;
+ goto error;
+ }
+
+ input = bufev->input;
+
+ /*
+ * If we have a high watermark configured then we don't want to
+ * read more data than would make us reach the watermark.
+ */
+ if (bufev->wm_read.high != 0) {
+ howmuch = bufev->wm_read.high - evbuffer_get_length(input);
+ /* we somehow lowered the watermark, stop reading */
+ if (howmuch <= 0) {
+ bufferevent_wm_suspend_read(bufev);
+ goto done;
+ }
+ }
+ readmax = bufferevent_get_read_max_(bufev_p);
+ if (howmuch < 0 || howmuch > readmax) /* The use of -1 for "unlimited"
+ * uglifies this code. XXXX */
+ howmuch = readmax;
+ if (bufev_p->read_suspended)
+ goto done;
+
+ evbuffer_unfreeze(input, 0);
+ res = evbuffer_read(input, fd, (int)howmuch); /* XXXX evbuffer_read would do better to take and return ev_ssize_t */
+ evbuffer_freeze(input, 0);
+
+ if (res == -1) {
+ int err = evutil_socket_geterror(fd);
+ if (EVUTIL_ERR_RW_RETRIABLE(err))
+ goto reschedule;
+ /* error case */
+ what |= BEV_EVENT_ERROR;
+ } else if (res == 0) {
+ /* eof case */
+ what |= BEV_EVENT_EOF;
+ }
+
+ if (res <= 0)
+ goto error;
+
+ bufferevent_decrement_read_buckets_(bufev_p, res);
+
+ /* Invoke the user callback - must always be called last */
+ bufferevent_trigger_nolock_(bufev, EV_READ, 0);
+
+ goto done;
+
+ reschedule:
+ goto done;
+
+ error:
+ bufferevent_disable(bufev, EV_READ);
+ bufferevent_run_eventcb_(bufev, what, 0);
+
+ done:
+ bufferevent_decref_and_unlock_(bufev);
+}
+
+static void
+bufferevent_writecb(evutil_socket_t fd, short event, void *arg)
+{
+ struct bufferevent *bufev = arg;
+ struct bufferevent_private *bufev_p =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ int res = 0;
+ short what = BEV_EVENT_WRITING;
+ int connected = 0;
+ ev_ssize_t atmost = -1;
+
+ bufferevent_incref_and_lock_(bufev);
+
+ if (event == EV_TIMEOUT) {
+ /* Note that we only check for event==EV_TIMEOUT. If
+ * event==EV_TIMEOUT|EV_WRITE, we can safely ignore the
+ * timeout, since a read has occurred */
+ what |= BEV_EVENT_TIMEOUT;
+ goto error;
+ }
+ if (bufev_p->connecting) {
+ int c = evutil_socket_finished_connecting_(fd);
+ /* we need to fake the error if the connection was refused
+ * immediately - usually connection to localhost on BSD */
+ if (bufev_p->connection_refused) {
+ bufev_p->connection_refused = 0;
+ c = -1;
+ }
+
+ if (c == 0)
+ goto done;
+
+ bufev_p->connecting = 0;
+ if (c < 0) {
+ event_del(&bufev->ev_write);
+ event_del(&bufev->ev_read);
+ bufferevent_run_eventcb_(bufev, BEV_EVENT_ERROR, 0);
+ goto done;
+ } else {
+ connected = 1;
+ bufferevent_socket_set_conn_address_fd(bufev_p, fd);
+#ifdef _WIN32
+ if (BEV_IS_ASYNC(bufev)) {
+ event_del(&bufev->ev_write);
+ bufferevent_async_set_connected_(bufev);
+ bufferevent_run_eventcb_(bufev,
+ BEV_EVENT_CONNECTED, 0);
+ goto done;
+ }
+#endif
+ bufferevent_run_eventcb_(bufev,
+ BEV_EVENT_CONNECTED, 0);
+ if (!(bufev->enabled & EV_WRITE) ||
+ bufev_p->write_suspended) {
+ event_del(&bufev->ev_write);
+ goto done;
+ }
+ }
+ }
+
+ atmost = bufferevent_get_write_max_(bufev_p);
+
+ if (bufev_p->write_suspended)
+ goto done;
+
+ if (evbuffer_get_length(bufev->output)) {
+ evbuffer_unfreeze(bufev->output, 1);
+ res = evbuffer_write_atmost(bufev->output, fd, atmost);
+ evbuffer_freeze(bufev->output, 1);
+ if (res == -1) {
+ int err = evutil_socket_geterror(fd);
+ if (EVUTIL_ERR_RW_RETRIABLE(err))
+ goto reschedule;
+ what |= BEV_EVENT_ERROR;
+ } else if (res == 0) {
+ /* eof case
+ XXXX Actually, a 0 on write doesn't indicate
+ an EOF. An ECONNRESET might be more typical.
+ */
+ what |= BEV_EVENT_EOF;
+ }
+ if (res <= 0)
+ goto error;
+
+ bufferevent_decrement_write_buckets_(bufev_p, res);
+ }
+
+ if (evbuffer_get_length(bufev->output) == 0) {
+ event_del(&bufev->ev_write);
+ }
+
+ /*
+ * Invoke the user callback if our buffer is drained or below the
+ * low watermark.
+ */
+ if (res || !connected) {
+ bufferevent_trigger_nolock_(bufev, EV_WRITE, 0);
+ }
+
+ goto done;
+
+ reschedule:
+ if (evbuffer_get_length(bufev->output) == 0) {
+ event_del(&bufev->ev_write);
+ }
+ goto done;
+
+ error:
+ bufferevent_disable(bufev, EV_WRITE);
+ bufferevent_run_eventcb_(bufev, what, 0);
+
+ done:
+ bufferevent_decref_and_unlock_(bufev);
+}
+
+struct bufferevent *
+bufferevent_socket_new(struct event_base *base, evutil_socket_t fd,
+ int options)
+{
+ struct bufferevent_private *bufev_p;
+ struct bufferevent *bufev;
+
+#ifdef _WIN32
+ if (base && event_base_get_iocp_(base))
+ return bufferevent_async_new_(base, fd, options);
+#endif
+
+ if ((bufev_p = mm_calloc(1, sizeof(struct bufferevent_private)))== NULL)
+ return NULL;
+
+ if (bufferevent_init_common_(bufev_p, base, &bufferevent_ops_socket,
+ options) < 0) {
+ mm_free(bufev_p);
+ return NULL;
+ }
+ bufev = &bufev_p->bev;
+ evbuffer_set_flags(bufev->output, EVBUFFER_FLAG_DRAINS_TO_FD);
+
+ event_assign(&bufev->ev_read, bufev->ev_base, fd,
+ EV_READ|EV_PERSIST|EV_FINALIZE, bufferevent_readcb, bufev);
+ event_assign(&bufev->ev_write, bufev->ev_base, fd,
+ EV_WRITE|EV_PERSIST|EV_FINALIZE, bufferevent_writecb, bufev);
+
+ evbuffer_add_cb(bufev->output, bufferevent_socket_outbuf_cb, bufev);
+
+ evbuffer_freeze(bufev->input, 0);
+ evbuffer_freeze(bufev->output, 1);
+
+ return bufev;
+}
+
+int
+bufferevent_socket_connect(struct bufferevent *bev,
+ const struct sockaddr *sa, int socklen)
+{
+ struct bufferevent_private *bufev_p =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+
+ evutil_socket_t fd;
+ int r = 0;
+ int result=-1;
+ int ownfd = 0;
+
+ bufferevent_incref_and_lock_(bev);
+
+ if (!bufev_p)
+ goto done;
+
+ fd = bufferevent_getfd(bev);
+ if (fd < 0) {
+ if (!sa)
+ goto done;
+ fd = evutil_socket_(sa->sa_family,
+ SOCK_STREAM|EVUTIL_SOCK_NONBLOCK, 0);
+ if (fd < 0)
+ goto done;
+ ownfd = 1;
+ }
+ if (sa) {
+#ifdef _WIN32
+ if (bufferevent_async_can_connect_(bev)) {
+ bufferevent_setfd(bev, fd);
+ r = bufferevent_async_connect_(bev, fd, sa, socklen);
+ if (r < 0)
+ goto freesock;
+ bufev_p->connecting = 1;
+ result = 0;
+ goto done;
+ } else
+#endif
+ r = evutil_socket_connect_(&fd, sa, socklen);
+ if (r < 0)
+ goto freesock;
+ }
+#ifdef _WIN32
+ /* ConnectEx() isn't always around, even when IOCP is enabled.
+ * Here, we borrow the socket object's write handler to fall back
+ * on a non-blocking connect() when ConnectEx() is unavailable. */
+ if (BEV_IS_ASYNC(bev)) {
+ event_assign(&bev->ev_write, bev->ev_base, fd,
+ EV_WRITE|EV_PERSIST|EV_FINALIZE, bufferevent_writecb, bev);
+ }
+#endif
+ bufferevent_setfd(bev, fd);
+ if (r == 0) {
+ if (! be_socket_enable(bev, EV_WRITE)) {
+ bufev_p->connecting = 1;
+ result = 0;
+ goto done;
+ }
+ } else if (r == 1) {
+ /* The connect succeeded already. How very BSD of it. */
+ result = 0;
+ bufev_p->connecting = 1;
+ event_active(&bev->ev_write, EV_WRITE, 1);
+ } else {
+ /* The connect failed already. How very BSD of it. */
+ bufev_p->connection_refused = 1;
+ bufev_p->connecting = 1;
+ result = 0;
+ event_active(&bev->ev_write, EV_WRITE, 1);
+ }
+
+ goto done;
+
+freesock:
+ bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR, 0);
+ if (ownfd)
+ evutil_closesocket(fd);
+ /* do something about the error? */
+done:
+ bufferevent_decref_and_unlock_(bev);
+ return result;
+}
+
+static void
+bufferevent_connect_getaddrinfo_cb(int result, struct evutil_addrinfo *ai,
+ void *arg)
+{
+ struct bufferevent *bev = arg;
+ struct bufferevent_private *bev_p =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+ int r;
+ BEV_LOCK(bev);
+
+ bufferevent_unsuspend_write_(bev, BEV_SUSPEND_LOOKUP);
+ bufferevent_unsuspend_read_(bev, BEV_SUSPEND_LOOKUP);
+
+ if (result != 0) {
+ bev_p->dns_error = result;
+ bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR, 0);
+ bufferevent_decref_and_unlock_(bev);
+ if (ai)
+ evutil_freeaddrinfo(ai);
+ return;
+ }
+
+ /* XXX use the other addrinfos? */
+ /* XXX use this return value */
+ bufferevent_socket_set_conn_address(bev_p, ai->ai_addr, (int)ai->ai_addrlen);
+ r = bufferevent_socket_connect(bev, ai->ai_addr, (int)ai->ai_addrlen);
+ (void)r;
+ bufferevent_decref_and_unlock_(bev);
+ evutil_freeaddrinfo(ai);
+}
+
+int
+bufferevent_socket_connect_hostname(struct bufferevent *bev,
+ struct evdns_base *evdns_base, int family, const char *hostname, int port)
+{
+ char portbuf[10];
+ struct evutil_addrinfo hint;
+ int err;
+ struct bufferevent_private *bev_p =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+
+ if (family != AF_INET && family != AF_INET6 && family != AF_UNSPEC)
+ return -1;
+ if (port < 1 || port > 65535)
+ return -1;
+
+ memset(&hint, 0, sizeof(hint));
+ hint.ai_family = family;
+ hint.ai_protocol = IPPROTO_TCP;
+ hint.ai_socktype = SOCK_STREAM;
+
+ evutil_snprintf(portbuf, sizeof(portbuf), "%d", port);
+
+ BEV_LOCK(bev);
+ bev_p->dns_error = 0;
+
+ bufferevent_suspend_write_(bev, BEV_SUSPEND_LOOKUP);
+ bufferevent_suspend_read_(bev, BEV_SUSPEND_LOOKUP);
+
+ bufferevent_incref_(bev);
+ err = evutil_getaddrinfo_async_(evdns_base, hostname, portbuf,
+ &hint, bufferevent_connect_getaddrinfo_cb, bev);
+ BEV_UNLOCK(bev);
+
+ if (err == 0) {
+ return 0;
+ } else {
+ bufferevent_unsuspend_write_(bev, BEV_SUSPEND_LOOKUP);
+ bufferevent_unsuspend_read_(bev, BEV_SUSPEND_LOOKUP);
+ bufferevent_decref_(bev);
+ return -1;
+ }
+}
+
+int
+bufferevent_socket_get_dns_error(struct bufferevent *bev)
+{
+ int rv;
+ struct bufferevent_private *bev_p =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+
+ BEV_LOCK(bev);
+ rv = bev_p->dns_error;
+ BEV_UNLOCK(bev);
+
+ return rv;
+}
+
+/*
+ * Create a new buffered event object.
+ *
+ * The read callback is invoked whenever we read new data.
+ * The write callback is invoked whenever the output buffer is drained.
+ * The error callback is invoked on a write/read error or on EOF.
+ *
+ * Both read and write callbacks maybe NULL. The error callback is not
+ * allowed to be NULL and have to be provided always.
+ */
+
+struct bufferevent *
+bufferevent_new(evutil_socket_t fd,
+ bufferevent_data_cb readcb, bufferevent_data_cb writecb,
+ bufferevent_event_cb eventcb, void *cbarg)
+{
+ struct bufferevent *bufev;
+
+ if (!(bufev = bufferevent_socket_new(NULL, fd, 0)))
+ return NULL;
+
+ bufferevent_setcb(bufev, readcb, writecb, eventcb, cbarg);
+
+ return bufev;
+}
+
+
+static int
+be_socket_enable(struct bufferevent *bufev, short event)
+{
+ if (event & EV_READ &&
+ bufferevent_add_event_(&bufev->ev_read, &bufev->timeout_read) == -1)
+ return -1;
+ if (event & EV_WRITE &&
+ bufferevent_add_event_(&bufev->ev_write, &bufev->timeout_write) == -1)
+ return -1;
+ return 0;
+}
+
+static int
+be_socket_disable(struct bufferevent *bufev, short event)
+{
+ struct bufferevent_private *bufev_p =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ if (event & EV_READ) {
+ if (event_del(&bufev->ev_read) == -1)
+ return -1;
+ }
+ /* Don't actually disable the write if we are trying to connect. */
+ if ((event & EV_WRITE) && ! bufev_p->connecting) {
+ if (event_del(&bufev->ev_write) == -1)
+ return -1;
+ }
+ return 0;
+}
+
+static void
+be_socket_destruct(struct bufferevent *bufev)
+{
+ struct bufferevent_private *bufev_p =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ evutil_socket_t fd;
+ EVUTIL_ASSERT(bufev->be_ops == &bufferevent_ops_socket);
+
+ fd = event_get_fd(&bufev->ev_read);
+
+ if ((bufev_p->options & BEV_OPT_CLOSE_ON_FREE) && fd >= 0)
+ EVUTIL_CLOSESOCKET(fd);
+}
+
+static int
+be_socket_flush(struct bufferevent *bev, short iotype,
+ enum bufferevent_flush_mode mode)
+{
+ return 0;
+}
+
+
+static void
+be_socket_setfd(struct bufferevent *bufev, evutil_socket_t fd)
+{
+ BEV_LOCK(bufev);
+ EVUTIL_ASSERT(bufev->be_ops == &bufferevent_ops_socket);
+
+ event_del(&bufev->ev_read);
+ event_del(&bufev->ev_write);
+
+ event_assign(&bufev->ev_read, bufev->ev_base, fd,
+ EV_READ|EV_PERSIST|EV_FINALIZE, bufferevent_readcb, bufev);
+ event_assign(&bufev->ev_write, bufev->ev_base, fd,
+ EV_WRITE|EV_PERSIST|EV_FINALIZE, bufferevent_writecb, bufev);
+
+ if (fd >= 0)
+ bufferevent_enable(bufev, bufev->enabled);
+
+ BEV_UNLOCK(bufev);
+}
+
+/* XXXX Should non-socket bufferevents support this? */
+int
+bufferevent_priority_set(struct bufferevent *bufev, int priority)
+{
+ int r = -1;
+ struct bufferevent_private *bufev_p =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+
+ BEV_LOCK(bufev);
+ if (bufev->be_ops != &bufferevent_ops_socket)
+ goto done;
+
+ if (event_priority_set(&bufev->ev_read, priority) == -1)
+ goto done;
+ if (event_priority_set(&bufev->ev_write, priority) == -1)
+ goto done;
+
+ event_deferred_cb_set_priority_(&bufev_p->deferred, priority);
+
+ r = 0;
+done:
+ BEV_UNLOCK(bufev);
+ return r;
+}
+
+/* XXXX Should non-socket bufferevents support this? */
+int
+bufferevent_base_set(struct event_base *base, struct bufferevent *bufev)
+{
+ int res = -1;
+
+ BEV_LOCK(bufev);
+ if (bufev->be_ops != &bufferevent_ops_socket)
+ goto done;
+
+ bufev->ev_base = base;
+
+ res = event_base_set(base, &bufev->ev_read);
+ if (res == -1)
+ goto done;
+
+ res = event_base_set(base, &bufev->ev_write);
+done:
+ BEV_UNLOCK(bufev);
+ return res;
+}
+
+static int
+be_socket_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
+ union bufferevent_ctrl_data *data)
+{
+ switch (op) {
+ case BEV_CTRL_SET_FD:
+ be_socket_setfd(bev, data->fd);
+ return 0;
+ case BEV_CTRL_GET_FD:
+ data->fd = event_get_fd(&bev->ev_read);
+ return 0;
+ case BEV_CTRL_GET_UNDERLYING:
+ case BEV_CTRL_CANCEL_ALL:
+ default:
+ return -1;
+ }
+}
diff --git a/libs/libevent/src/changelist-internal.h b/libs/libevent/src/changelist-internal.h
new file mode 100644
index 0000000000..98fc52aebf
--- /dev/null
+++ b/libs/libevent/src/changelist-internal.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef CHANGELIST_INTERNAL_H_INCLUDED_
+#define CHANGELIST_INTERNAL_H_INCLUDED_
+
+/*
+ A "changelist" is a list of all the fd status changes that should be made
+ between calls to the backend's dispatch function. There are a few reasons
+ that a backend would want to queue changes like this rather than processing
+ them immediately.
+
+ 1) Sometimes applications will add and delete the same event more than
+ once between calls to dispatch. Processing these changes immediately
+ is needless, and potentially expensive (especially if we're on a system
+ that makes one syscall per changed event).
+
+ 2) Sometimes we can coalesce multiple changes on the same fd into a single
+ syscall if we know about them in advance. For example, epoll can do an
+ add and a delete at the same time, but only if we have found out about
+ both of them before we tell epoll.
+
+ 3) Sometimes adding an event that we immediately delete can cause
+ unintended consequences: in kqueue, this makes pending events get
+ reported spuriously.
+ */
+
+#include "event2/util.h"
+
+/** Represents a */
+struct event_change {
+ /** The fd or signal whose events are to be changed */
+ evutil_socket_t fd;
+ /* The events that were enabled on the fd before any of these changes
+ were made. May include EV_READ or EV_WRITE. */
+ short old_events;
+
+ /* The changes that we want to make in reading and writing on this fd.
+ * If this is a signal, then read_change has EV_CHANGE_SIGNAL set,
+ * and write_change is unused. */
+ ev_uint8_t read_change;
+ ev_uint8_t write_change;
+ ev_uint8_t close_change;
+};
+
+/* Flags for read_change and write_change. */
+
+/* If set, add the event. */
+#define EV_CHANGE_ADD 0x01
+/* If set, delete the event. Exclusive with EV_CHANGE_ADD */
+#define EV_CHANGE_DEL 0x02
+/* If set, this event refers a signal, not an fd. */
+#define EV_CHANGE_SIGNAL EV_SIGNAL
+/* Set for persistent events. Currently not used. */
+#define EV_CHANGE_PERSIST EV_PERSIST
+/* Set for adding edge-triggered events. */
+#define EV_CHANGE_ET EV_ET
+
+/* The value of fdinfo_size that a backend should use if it is letting
+ * changelist handle its add and delete functions. */
+#define EVENT_CHANGELIST_FDINFO_SIZE sizeof(int)
+
+/** Set up the data fields in a changelist. */
+void event_changelist_init_(struct event_changelist *changelist);
+/** Remove every change in the changelist, and make corresponding changes
+ * in the event maps in the base. This function is generally used right
+ * after making all the changes in the changelist. */
+void event_changelist_remove_all_(struct event_changelist *changelist,
+ struct event_base *base);
+/** Free all memory held in a changelist. */
+void event_changelist_freemem_(struct event_changelist *changelist);
+
+/** Implementation of eventop_add that queues the event in a changelist. */
+int event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events,
+ void *p);
+/** Implementation of eventop_del that queues the event in a changelist. */
+int event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events,
+ void *p);
+
+#endif
diff --git a/libs/libevent/src/compat/sys/queue.h b/libs/libevent/src/compat/sys/queue.h
new file mode 100644
index 0000000000..c387bdcf50
--- /dev/null
+++ b/libs/libevent/src/compat/sys/queue.h
@@ -0,0 +1,488 @@
+/* $OpenBSD: queue.h,v 1.16 2000/09/07 19:47:59 art Exp $ */
+/* $NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $ */
+
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ */
+
+#ifndef SYS_QUEUE_H__
+#define SYS_QUEUE_H__
+
+/*
+ * This file defines five types of data structures: singly-linked lists,
+ * lists, simple queues, tail queues, and circular queues.
+ *
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction. Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A simple queue is headed by a pair of pointers, one the head of the
+ * list and the other to the tail of the list. The elements are singly
+ * linked to save space, so elements can only be removed from the
+ * head of the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the
+ * list. A simple queue may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+/*
+ * Singly-linked List definitions.
+ */
+#define SLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define SLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#ifndef _WIN32
+#define SLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+#endif
+
+/*
+ * Singly-linked List access methods.
+ */
+#define SLIST_FIRST(head) ((head)->slh_first)
+#define SLIST_END(head) NULL
+#define SLIST_EMPTY(head) (SLIST_FIRST(head) == SLIST_END(head))
+#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+#define SLIST_FOREACH(var, head, field) \
+ for((var) = SLIST_FIRST(head); \
+ (var) != SLIST_END(head); \
+ (var) = SLIST_NEXT(var, field))
+
+/*
+ * Singly-linked List functions.
+ */
+#define SLIST_INIT(head) { \
+ SLIST_FIRST(head) = SLIST_END(head); \
+}
+
+#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ (elm)->field.sle_next = (slistelm)->field.sle_next; \
+ (slistelm)->field.sle_next = (elm); \
+} while (0)
+
+#define SLIST_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.sle_next = (head)->slh_first; \
+ (head)->slh_first = (elm); \
+} while (0)
+
+#define SLIST_REMOVE_HEAD(head, field) do { \
+ (head)->slh_first = (head)->slh_first->field.sle_next; \
+} while (0)
+
+/*
+ * List definitions.
+ */
+#define LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define LIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+/*
+ * List access methods
+ */
+#define LIST_FIRST(head) ((head)->lh_first)
+#define LIST_END(head) NULL
+#define LIST_EMPTY(head) (LIST_FIRST(head) == LIST_END(head))
+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
+
+#define LIST_FOREACH(var, head, field) \
+ for((var) = LIST_FIRST(head); \
+ (var)!= LIST_END(head); \
+ (var) = LIST_NEXT(var, field))
+
+/*
+ * List functions.
+ */
+#define LIST_INIT(head) do { \
+ LIST_FIRST(head) = LIST_END(head); \
+} while (0)
+
+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
+ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
+ (listelm)->field.le_next->field.le_prev = \
+ &(elm)->field.le_next; \
+ (listelm)->field.le_next = (elm); \
+ (elm)->field.le_prev = &(listelm)->field.le_next; \
+} while (0)
+
+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
+} while (0)
+
+#define LIST_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.le_next = (head)->lh_first) != NULL) \
+ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+ (head)->lh_first = (elm); \
+ (elm)->field.le_prev = &(head)->lh_first; \
+} while (0)
+
+#define LIST_REMOVE(elm, field) do { \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+} while (0)
+
+#define LIST_REPLACE(elm, elm2, field) do { \
+ if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
+ (elm2)->field.le_next->field.le_prev = \
+ &(elm2)->field.le_next; \
+ (elm2)->field.le_prev = (elm)->field.le_prev; \
+ *(elm2)->field.le_prev = (elm2); \
+} while (0)
+
+/*
+ * Simple queue definitions.
+ */
+#define SIMPLEQ_HEAD(name, type) \
+struct name { \
+ struct type *sqh_first; /* first element */ \
+ struct type **sqh_last; /* addr of last next element */ \
+}
+
+#define SIMPLEQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).sqh_first }
+
+#define SIMPLEQ_ENTRY(type) \
+struct { \
+ struct type *sqe_next; /* next element */ \
+}
+
+/*
+ * Simple queue access methods.
+ */
+#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
+#define SIMPLEQ_END(head) NULL
+#define SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head))
+#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
+
+#define SIMPLEQ_FOREACH(var, head, field) \
+ for((var) = SIMPLEQ_FIRST(head); \
+ (var) != SIMPLEQ_END(head); \
+ (var) = SIMPLEQ_NEXT(var, field))
+
+/*
+ * Simple queue functions.
+ */
+#define SIMPLEQ_INIT(head) do { \
+ (head)->sqh_first = NULL; \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (0)
+
+#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (head)->sqh_first = (elm); \
+} while (0)
+
+#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.sqe_next = NULL; \
+ *(head)->sqh_last = (elm); \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+} while (0)
+
+#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (listelm)->field.sqe_next = (elm); \
+} while (0)
+
+#define SIMPLEQ_REMOVE_HEAD(head, elm, field) do { \
+ if (((head)->sqh_first = (elm)->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (0)
+
+/*
+ * Tail queue definitions.
+ */
+#define TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; /* first element */ \
+ struct type **tqh_last; /* addr of last next element */ \
+}
+
+#define TAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).tqh_first }
+
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+}
+
+/*
+ * tail queue access methods
+ */
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+#define TAILQ_END(head) NULL
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+#define TAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+/* XXX */
+#define TAILQ_PREV(elm, headname, field) \
+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+#define TAILQ_EMPTY(head) \
+ (TAILQ_FIRST(head) == TAILQ_END(head))
+
+#define TAILQ_FOREACH(var, head, field) \
+ for((var) = TAILQ_FIRST(head); \
+ (var) != TAILQ_END(head); \
+ (var) = TAILQ_NEXT(var, field))
+
+#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
+ for((var) = TAILQ_LAST(head, headname); \
+ (var) != TAILQ_END(head); \
+ (var) = TAILQ_PREV(var, headname, field))
+
+/*
+ * Tail queue functions.
+ */
+#define TAILQ_INIT(head) do { \
+ (head)->tqh_first = NULL; \
+ (head)->tqh_last = &(head)->tqh_first; \
+} while (0)
+
+#define TAILQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
+ (head)->tqh_first->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (head)->tqh_first = (elm); \
+ (elm)->field.tqe_prev = &(head)->tqh_first; \
+} while (0)
+
+#define TAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.tqe_next = NULL; \
+ (elm)->field.tqe_prev = (head)->tqh_last; \
+ *(head)->tqh_last = (elm); \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
+ (elm)->field.tqe_next->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (listelm)->field.tqe_next = (elm); \
+ (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_REMOVE(head, elm, field) do { \
+ if (((elm)->field.tqe_next) != NULL) \
+ (elm)->field.tqe_next->field.tqe_prev = \
+ (elm)->field.tqe_prev; \
+ else \
+ (head)->tqh_last = (elm)->field.tqe_prev; \
+ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_REPLACE(head, elm, elm2, field) do { \
+ if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL) \
+ (elm2)->field.tqe_next->field.tqe_prev = \
+ &(elm2)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm2)->field.tqe_next; \
+ (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
+ *(elm2)->field.tqe_prev = (elm2); \
+} while (0)
+
+/*
+ * Circular queue definitions.
+ */
+#define CIRCLEQ_HEAD(name, type) \
+struct name { \
+ struct type *cqh_first; /* first element */ \
+ struct type *cqh_last; /* last element */ \
+}
+
+#define CIRCLEQ_HEAD_INITIALIZER(head) \
+ { CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
+
+#define CIRCLEQ_ENTRY(type) \
+struct { \
+ struct type *cqe_next; /* next element */ \
+ struct type *cqe_prev; /* previous element */ \
+}
+
+/*
+ * Circular queue access methods
+ */
+#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
+#define CIRCLEQ_LAST(head) ((head)->cqh_last)
+#define CIRCLEQ_END(head) ((void *)(head))
+#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
+#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
+#define CIRCLEQ_EMPTY(head) \
+ (CIRCLEQ_FIRST(head) == CIRCLEQ_END(head))
+
+#define CIRCLEQ_FOREACH(var, head, field) \
+ for((var) = CIRCLEQ_FIRST(head); \
+ (var) != CIRCLEQ_END(head); \
+ (var) = CIRCLEQ_NEXT(var, field))
+
+#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
+ for((var) = CIRCLEQ_LAST(head); \
+ (var) != CIRCLEQ_END(head); \
+ (var) = CIRCLEQ_PREV(var, field))
+
+/*
+ * Circular queue functions.
+ */
+#define CIRCLEQ_INIT(head) do { \
+ (head)->cqh_first = CIRCLEQ_END(head); \
+ (head)->cqh_last = CIRCLEQ_END(head); \
+} while (0)
+
+#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm)->field.cqe_next; \
+ (elm)->field.cqe_prev = (listelm); \
+ if ((listelm)->field.cqe_next == CIRCLEQ_END(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (listelm)->field.cqe_next->field.cqe_prev = (elm); \
+ (listelm)->field.cqe_next = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm); \
+ (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
+ if ((listelm)->field.cqe_prev == CIRCLEQ_END(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (listelm)->field.cqe_prev->field.cqe_next = (elm); \
+ (listelm)->field.cqe_prev = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.cqe_next = (head)->cqh_first; \
+ (elm)->field.cqe_prev = CIRCLEQ_END(head); \
+ if ((head)->cqh_last == CIRCLEQ_END(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (head)->cqh_first->field.cqe_prev = (elm); \
+ (head)->cqh_first = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.cqe_next = CIRCLEQ_END(head); \
+ (elm)->field.cqe_prev = (head)->cqh_last; \
+ if ((head)->cqh_first == CIRCLEQ_END(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (head)->cqh_last->field.cqe_next = (elm); \
+ (head)->cqh_last = (elm); \
+} while (0)
+
+#define CIRCLEQ_REMOVE(head, elm, field) do { \
+ if ((elm)->field.cqe_next == CIRCLEQ_END(head)) \
+ (head)->cqh_last = (elm)->field.cqe_prev; \
+ else \
+ (elm)->field.cqe_next->field.cqe_prev = \
+ (elm)->field.cqe_prev; \
+ if ((elm)->field.cqe_prev == CIRCLEQ_END(head)) \
+ (head)->cqh_first = (elm)->field.cqe_next; \
+ else \
+ (elm)->field.cqe_prev->field.cqe_next = \
+ (elm)->field.cqe_next; \
+} while (0)
+
+#define CIRCLEQ_REPLACE(head, elm, elm2, field) do { \
+ if (((elm2)->field.cqe_next = (elm)->field.cqe_next) == \
+ CIRCLEQ_END(head)) \
+ (head).cqh_last = (elm2); \
+ else \
+ (elm2)->field.cqe_next->field.cqe_prev = (elm2); \
+ if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) == \
+ CIRCLEQ_END(head)) \
+ (head).cqh_first = (elm2); \
+ else \
+ (elm2)->field.cqe_prev->field.cqe_next = (elm2); \
+} while (0)
+
+#endif /* !SYS_QUEUE_H__ */
diff --git a/libs/libevent/src/defer-internal.h b/libs/libevent/src/defer-internal.h
new file mode 100644
index 0000000000..e3c7d7da5b
--- /dev/null
+++ b/libs/libevent/src/defer-internal.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef DEFER_INTERNAL_H_INCLUDED_
+#define DEFER_INTERNAL_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <sys/queue.h>
+
+struct event_callback;
+typedef void (*deferred_cb_fn)(struct event_callback *, void *);
+
+/**
+ Initialize an empty, non-pending event_callback.
+
+ @param deferred The struct event_callback structure to initialize.
+ @param priority The priority that the callback should run at.
+ @param cb The function to run when the struct event_callback executes.
+ @param arg The function's second argument.
+ */
+void event_deferred_cb_init_(struct event_callback *, ev_uint8_t, deferred_cb_fn, void *);
+/**
+ Change the priority of a non-pending event_callback.
+ */
+void event_deferred_cb_set_priority_(struct event_callback *, ev_uint8_t);
+/**
+ Cancel a struct event_callback if it is currently scheduled in an event_base.
+ */
+void event_deferred_cb_cancel_(struct event_base *, struct event_callback *);
+/**
+ Activate a struct event_callback if it is not currently scheduled in an event_base.
+
+ Return true if it was not previously scheduled.
+ */
+int event_deferred_cb_schedule_(struct event_base *, struct event_callback *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT_INTERNAL_H_INCLUDED_ */
+
diff --git a/libs/libevent/src/epolltable-internal.h b/libs/libevent/src/epolltable-internal.h
new file mode 100644
index 0000000000..da30e0973a
--- /dev/null
+++ b/libs/libevent/src/epolltable-internal.h
@@ -0,0 +1,1166 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EPOLLTABLE_INTERNAL_H_INCLUDED_
+#define EPOLLTABLE_INTERNAL_H_INCLUDED_
+
+/*
+ Here are the values we're masking off to decide what operations to do.
+ Note that since EV_READ|EV_WRITE.
+
+ Note also that this table is a little sparse, since ADD+DEL is
+ nonsensical ("xxx" in the list below.)
+
+ Note also also that we are shifting old_events by only 5 bits, since
+ EV_READ is 2 and EV_WRITE is 4.
+
+ The table was auto-generated with a python script, according to this
+ pseudocode:[*0]
+
+ If either the read or the write change is add+del:
+ This is impossible; Set op==-1, events=0.
+ Else, if either the read or the write change is add:
+ Set events to 0.
+ If the read change is add, or
+ (the read change is not del, and ev_read is in old_events):
+ Add EPOLLIN to events.
+ If the write change is add, or
+ (the write change is not del, and ev_write is in old_events):
+ Add EPOLLOUT to events.
+
+ If old_events is set:
+ Set op to EPOLL_CTL_MOD [*1,*2]
+ Else:
+ Set op to EPOLL_CTL_ADD [*3]
+
+ Else, if the read or the write change is del:
+ Set op to EPOLL_CTL_DEL.
+ If the read change is del:
+ If the write change is del:
+ Set events to EPOLLIN|EPOLLOUT
+ Else if ev_write is in old_events:
+ Set events to EPOLLOUT
+ Set op to EPOLL_CTL_MOD
+ Else
+ Set events to EPOLLIN
+ Else:
+ {The write change is del.}
+ If ev_read is in old_events:
+ Set events to EPOLLIN
+ Set op to EPOLL_CTL_MOD
+ Else:
+ Set the events to EPOLLOUT
+
+ Else:
+ There is no read or write change; set op to 0 and events to 0.
+
+ The logic is a little tricky, since we had no events set on the fd before,
+ we need to set op="ADD" and set events=the events we want to add. If we
+ had any events set on the fd before, and we want any events to remain on
+ the fd, we need to say op="MOD" and set events=the events we want to
+ remain. But if we want to delete the last event, we say op="DEL" and
+ set events=(any non-null pointer).
+
+ [*0] Actually, the Python script has gotten a bit more complicated, to
+ support EPOLLRDHUP.
+
+ [*1] This MOD is only a guess. MOD might fail with ENOENT if the file was
+ closed and a new file was opened with the same fd. If so, we'll retry
+ with ADD.
+
+ [*2] We can't replace this with a no-op even if old_events is the same as
+ the new events: if the file was closed and reopened, we need to retry
+ with an ADD. (We do a MOD in this case since "no change" is more
+ common than "close and reopen", so we'll usually wind up doing 1
+ syscalls instead of 2.)
+
+ [*3] This ADD is only a guess. There is a fun Linux kernel issue where if
+ you have two fds for the same file (via dup) and you ADD one to an
+ epfd, then close it, then re-create it with the same fd (via dup2 or an
+ unlucky dup), then try to ADD it again, you'll get an EEXIST, since the
+ struct epitem is not actually removed from the struct eventpoll until
+ the file itself is closed.
+
+ EV_CHANGE_ADD==1
+ EV_CHANGE_DEL==2
+ EV_READ ==2
+ EV_WRITE ==4
+ EV_CLOSED ==0x80
+
+ Bit 0: close change is add
+ Bit 1: close change is del
+ Bit 2: read change is add
+ Bit 3: read change is del
+ Bit 4: write change is add
+ Bit 5: write change is del
+ Bit 6: old events had EV_READ
+ Bit 7: old events had EV_WRITE
+ Bit 8: old events had EV_CLOSED
+*/
+
+#define EPOLL_OP_TABLE_INDEX(c) \
+ ( (((c)->close_change&(EV_CHANGE_ADD|EV_CHANGE_DEL))) | \
+ (((c)->read_change&(EV_CHANGE_ADD|EV_CHANGE_DEL)) << 2) | \
+ (((c)->write_change&(EV_CHANGE_ADD|EV_CHANGE_DEL)) << 4) | \
+ (((c)->old_events&(EV_READ|EV_WRITE)) << 5) | \
+ (((c)->old_events&(EV_CLOSED)) << 1) \
+ )
+
+#if EV_READ != 2 || EV_WRITE != 4 || EV_CLOSED != 0x80 || EV_CHANGE_ADD != 1 || EV_CHANGE_DEL != 2
+#error "Libevent's internals changed! Regenerate the op_table in epolltable-internal.h"
+#endif
+
+static const struct operation {
+ int events;
+ int op;
+} epoll_op_table[] = {
+ /* old= 0, write: 0, read: 0, close: 0 */
+ { 0, 0 },
+ /* old= 0, write: 0, read: 0, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_ADD },
+ /* old= 0, write: 0, read: 0, close:del */
+ { EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= 0, write: 0, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= 0, write: 0, read:add, close: 0 */
+ { EPOLLIN, EPOLL_CTL_ADD },
+ /* old= 0, write: 0, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_ADD },
+ /* old= 0, write: 0, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_ADD },
+ /* old= 0, write: 0, read:add, close:xxx */
+ { 0, 255 },
+ /* old= 0, write: 0, read:del, close: 0 */
+ { EPOLLIN, EPOLL_CTL_DEL },
+ /* old= 0, write: 0, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_ADD },
+ /* old= 0, write: 0, read:del, close:del */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= 0, write: 0, read:del, close:xxx */
+ { 0, 255 },
+ /* old= 0, write: 0, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= 0, write: 0, read:xxx, close:add */
+ { 0, 255 },
+ /* old= 0, write: 0, read:xxx, close:del */
+ { 0, 255 },
+ /* old= 0, write: 0, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:add, read: 0, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_ADD },
+ /* old= 0, write:add, read: 0, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_ADD },
+ /* old= 0, write:add, read: 0, close:del */
+ { EPOLLOUT, EPOLL_CTL_ADD },
+ /* old= 0, write:add, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:add, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_ADD },
+ /* old= 0, write:add, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_ADD },
+ /* old= 0, write:add, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_ADD },
+ /* old= 0, write:add, read:add, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:add, read:del, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_ADD },
+ /* old= 0, write:add, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_ADD },
+ /* old= 0, write:add, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_ADD },
+ /* old= 0, write:add, read:del, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:add, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= 0, write:add, read:xxx, close:add */
+ { 0, 255 },
+ /* old= 0, write:add, read:xxx, close:del */
+ { 0, 255 },
+ /* old= 0, write:add, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:del, read: 0, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_DEL },
+ /* old= 0, write:del, read: 0, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_ADD },
+ /* old= 0, write:del, read: 0, close:del */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= 0, write:del, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:del, read:add, close: 0 */
+ { EPOLLIN, EPOLL_CTL_ADD },
+ /* old= 0, write:del, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_ADD },
+ /* old= 0, write:del, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_ADD },
+ /* old= 0, write:del, read:add, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:del, read:del, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_DEL },
+ /* old= 0, write:del, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_ADD },
+ /* old= 0, write:del, read:del, close:del */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= 0, write:del, read:del, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:del, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= 0, write:del, read:xxx, close:add */
+ { 0, 255 },
+ /* old= 0, write:del, read:xxx, close:del */
+ { 0, 255 },
+ /* old= 0, write:del, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:xxx, read: 0, close: 0 */
+ { 0, 255 },
+ /* old= 0, write:xxx, read: 0, close:add */
+ { 0, 255 },
+ /* old= 0, write:xxx, read: 0, close:del */
+ { 0, 255 },
+ /* old= 0, write:xxx, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:add, close: 0 */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:add, close:add */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:add, close:del */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:add, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:del, close: 0 */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:del, close:add */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:del, close:del */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:del, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:xxx, close:add */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:xxx, close:del */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= r, write: 0, read: 0, close: 0 */
+ { 0, 0 },
+ /* old= r, write: 0, read: 0, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= r, write: 0, read: 0, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= r, write: 0, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= r, write: 0, read:add, close: 0 */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= r, write: 0, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= r, write: 0, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= r, write: 0, read:add, close:xxx */
+ { 0, 255 },
+ /* old= r, write: 0, read:del, close: 0 */
+ { EPOLLIN, EPOLL_CTL_DEL },
+ /* old= r, write: 0, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= r, write: 0, read:del, close:del */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= r, write: 0, read:del, close:xxx */
+ { 0, 255 },
+ /* old= r, write: 0, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= r, write: 0, read:xxx, close:add */
+ { 0, 255 },
+ /* old= r, write: 0, read:xxx, close:del */
+ { 0, 255 },
+ /* old= r, write: 0, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= r, write:add, read: 0, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= r, write:add, read: 0, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= r, write:add, read: 0, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= r, write:add, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= r, write:add, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= r, write:add, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= r, write:add, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= r, write:add, read:add, close:xxx */
+ { 0, 255 },
+ /* old= r, write:add, read:del, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= r, write:add, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= r, write:add, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= r, write:add, read:del, close:xxx */
+ { 0, 255 },
+ /* old= r, write:add, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= r, write:add, read:xxx, close:add */
+ { 0, 255 },
+ /* old= r, write:add, read:xxx, close:del */
+ { 0, 255 },
+ /* old= r, write:add, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= r, write:del, read: 0, close: 0 */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= r, write:del, read: 0, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= r, write:del, read: 0, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= r, write:del, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= r, write:del, read:add, close: 0 */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= r, write:del, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= r, write:del, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= r, write:del, read:add, close:xxx */
+ { 0, 255 },
+ /* old= r, write:del, read:del, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_DEL },
+ /* old= r, write:del, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= r, write:del, read:del, close:del */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= r, write:del, read:del, close:xxx */
+ { 0, 255 },
+ /* old= r, write:del, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= r, write:del, read:xxx, close:add */
+ { 0, 255 },
+ /* old= r, write:del, read:xxx, close:del */
+ { 0, 255 },
+ /* old= r, write:del, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= r, write:xxx, read: 0, close: 0 */
+ { 0, 255 },
+ /* old= r, write:xxx, read: 0, close:add */
+ { 0, 255 },
+ /* old= r, write:xxx, read: 0, close:del */
+ { 0, 255 },
+ /* old= r, write:xxx, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= r, write:xxx, read:add, close: 0 */
+ { 0, 255 },
+ /* old= r, write:xxx, read:add, close:add */
+ { 0, 255 },
+ /* old= r, write:xxx, read:add, close:del */
+ { 0, 255 },
+ /* old= r, write:xxx, read:add, close:xxx */
+ { 0, 255 },
+ /* old= r, write:xxx, read:del, close: 0 */
+ { 0, 255 },
+ /* old= r, write:xxx, read:del, close:add */
+ { 0, 255 },
+ /* old= r, write:xxx, read:del, close:del */
+ { 0, 255 },
+ /* old= r, write:xxx, read:del, close:xxx */
+ { 0, 255 },
+ /* old= r, write:xxx, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= r, write:xxx, read:xxx, close:add */
+ { 0, 255 },
+ /* old= r, write:xxx, read:xxx, close:del */
+ { 0, 255 },
+ /* old= r, write:xxx, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= w, write: 0, read: 0, close: 0 */
+ { 0, 0 },
+ /* old= w, write: 0, read: 0, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= w, write: 0, read: 0, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write: 0, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= w, write: 0, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write: 0, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= w, write: 0, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write: 0, read:add, close:xxx */
+ { 0, 255 },
+ /* old= w, write: 0, read:del, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write: 0, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= w, write: 0, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write: 0, read:del, close:xxx */
+ { 0, 255 },
+ /* old= w, write: 0, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= w, write: 0, read:xxx, close:add */
+ { 0, 255 },
+ /* old= w, write: 0, read:xxx, close:del */
+ { 0, 255 },
+ /* old= w, write: 0, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= w, write:add, read: 0, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write:add, read: 0, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= w, write:add, read: 0, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write:add, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= w, write:add, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write:add, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= w, write:add, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write:add, read:add, close:xxx */
+ { 0, 255 },
+ /* old= w, write:add, read:del, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write:add, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= w, write:add, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write:add, read:del, close:xxx */
+ { 0, 255 },
+ /* old= w, write:add, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= w, write:add, read:xxx, close:add */
+ { 0, 255 },
+ /* old= w, write:add, read:xxx, close:del */
+ { 0, 255 },
+ /* old= w, write:add, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= w, write:del, read: 0, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_DEL },
+ /* old= w, write:del, read: 0, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= w, write:del, read: 0, close:del */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= w, write:del, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= w, write:del, read:add, close: 0 */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= w, write:del, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= w, write:del, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= w, write:del, read:add, close:xxx */
+ { 0, 255 },
+ /* old= w, write:del, read:del, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_DEL },
+ /* old= w, write:del, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= w, write:del, read:del, close:del */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= w, write:del, read:del, close:xxx */
+ { 0, 255 },
+ /* old= w, write:del, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= w, write:del, read:xxx, close:add */
+ { 0, 255 },
+ /* old= w, write:del, read:xxx, close:del */
+ { 0, 255 },
+ /* old= w, write:del, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= w, write:xxx, read: 0, close: 0 */
+ { 0, 255 },
+ /* old= w, write:xxx, read: 0, close:add */
+ { 0, 255 },
+ /* old= w, write:xxx, read: 0, close:del */
+ { 0, 255 },
+ /* old= w, write:xxx, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= w, write:xxx, read:add, close: 0 */
+ { 0, 255 },
+ /* old= w, write:xxx, read:add, close:add */
+ { 0, 255 },
+ /* old= w, write:xxx, read:add, close:del */
+ { 0, 255 },
+ /* old= w, write:xxx, read:add, close:xxx */
+ { 0, 255 },
+ /* old= w, write:xxx, read:del, close: 0 */
+ { 0, 255 },
+ /* old= w, write:xxx, read:del, close:add */
+ { 0, 255 },
+ /* old= w, write:xxx, read:del, close:del */
+ { 0, 255 },
+ /* old= w, write:xxx, read:del, close:xxx */
+ { 0, 255 },
+ /* old= w, write:xxx, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= w, write:xxx, read:xxx, close:add */
+ { 0, 255 },
+ /* old= w, write:xxx, read:xxx, close:del */
+ { 0, 255 },
+ /* old= w, write:xxx, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= rw, write: 0, read: 0, close: 0 */
+ { 0, 0 },
+ /* old= rw, write: 0, read: 0, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= rw, write: 0, read: 0, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write: 0, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= rw, write: 0, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write: 0, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= rw, write: 0, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write: 0, read:add, close:xxx */
+ { 0, 255 },
+ /* old= rw, write: 0, read:del, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write: 0, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= rw, write: 0, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write: 0, read:del, close:xxx */
+ { 0, 255 },
+ /* old= rw, write: 0, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= rw, write: 0, read:xxx, close:add */
+ { 0, 255 },
+ /* old= rw, write: 0, read:xxx, close:del */
+ { 0, 255 },
+ /* old= rw, write: 0, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:add, read: 0, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write:add, read: 0, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= rw, write:add, read: 0, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write:add, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:add, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write:add, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= rw, write:add, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write:add, read:add, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:add, read:del, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write:add, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= rw, write:add, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write:add, read:del, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:add, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= rw, write:add, read:xxx, close:add */
+ { 0, 255 },
+ /* old= rw, write:add, read:xxx, close:del */
+ { 0, 255 },
+ /* old= rw, write:add, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:del, read: 0, close: 0 */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= rw, write:del, read: 0, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= rw, write:del, read: 0, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= rw, write:del, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:del, read:add, close: 0 */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= rw, write:del, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= rw, write:del, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= rw, write:del, read:add, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:del, read:del, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_DEL },
+ /* old= rw, write:del, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= rw, write:del, read:del, close:del */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= rw, write:del, read:del, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:del, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= rw, write:del, read:xxx, close:add */
+ { 0, 255 },
+ /* old= rw, write:del, read:xxx, close:del */
+ { 0, 255 },
+ /* old= rw, write:del, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:xxx, read: 0, close: 0 */
+ { 0, 255 },
+ /* old= rw, write:xxx, read: 0, close:add */
+ { 0, 255 },
+ /* old= rw, write:xxx, read: 0, close:del */
+ { 0, 255 },
+ /* old= rw, write:xxx, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:add, close: 0 */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:add, close:add */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:add, close:del */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:add, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:del, close: 0 */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:del, close:add */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:del, close:del */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:del, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:xxx, close:add */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:xxx, close:del */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= c, write: 0, read: 0, close: 0 */
+ { 0, 0 },
+ /* old= c, write: 0, read: 0, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write: 0, read: 0, close:del */
+ { EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= c, write: 0, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= c, write: 0, read:add, close: 0 */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write: 0, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write: 0, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= c, write: 0, read:add, close:xxx */
+ { 0, 255 },
+ /* old= c, write: 0, read:del, close: 0 */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write: 0, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write: 0, read:del, close:del */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= c, write: 0, read:del, close:xxx */
+ { 0, 255 },
+ /* old= c, write: 0, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= c, write: 0, read:xxx, close:add */
+ { 0, 255 },
+ /* old= c, write: 0, read:xxx, close:del */
+ { 0, 255 },
+ /* old= c, write: 0, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= c, write:add, read: 0, close: 0 */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:add, read: 0, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:add, read: 0, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= c, write:add, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= c, write:add, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:add, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:add, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= c, write:add, read:add, close:xxx */
+ { 0, 255 },
+ /* old= c, write:add, read:del, close: 0 */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:add, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:add, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= c, write:add, read:del, close:xxx */
+ { 0, 255 },
+ /* old= c, write:add, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= c, write:add, read:xxx, close:add */
+ { 0, 255 },
+ /* old= c, write:add, read:xxx, close:del */
+ { 0, 255 },
+ /* old= c, write:add, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= c, write:del, read: 0, close: 0 */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:del, read: 0, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:del, read: 0, close:del */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= c, write:del, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= c, write:del, read:add, close: 0 */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:del, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:del, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= c, write:del, read:add, close:xxx */
+ { 0, 255 },
+ /* old= c, write:del, read:del, close: 0 */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:del, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:del, read:del, close:del */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= c, write:del, read:del, close:xxx */
+ { 0, 255 },
+ /* old= c, write:del, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= c, write:del, read:xxx, close:add */
+ { 0, 255 },
+ /* old= c, write:del, read:xxx, close:del */
+ { 0, 255 },
+ /* old= c, write:del, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= c, write:xxx, read: 0, close: 0 */
+ { 0, 255 },
+ /* old= c, write:xxx, read: 0, close:add */
+ { 0, 255 },
+ /* old= c, write:xxx, read: 0, close:del */
+ { 0, 255 },
+ /* old= c, write:xxx, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= c, write:xxx, read:add, close: 0 */
+ { 0, 255 },
+ /* old= c, write:xxx, read:add, close:add */
+ { 0, 255 },
+ /* old= c, write:xxx, read:add, close:del */
+ { 0, 255 },
+ /* old= c, write:xxx, read:add, close:xxx */
+ { 0, 255 },
+ /* old= c, write:xxx, read:del, close: 0 */
+ { 0, 255 },
+ /* old= c, write:xxx, read:del, close:add */
+ { 0, 255 },
+ /* old= c, write:xxx, read:del, close:del */
+ { 0, 255 },
+ /* old= c, write:xxx, read:del, close:xxx */
+ { 0, 255 },
+ /* old= c, write:xxx, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= c, write:xxx, read:xxx, close:add */
+ { 0, 255 },
+ /* old= c, write:xxx, read:xxx, close:del */
+ { 0, 255 },
+ /* old= c, write:xxx, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= cr, write: 0, read: 0, close: 0 */
+ { 0, 0 },
+ /* old= cr, write: 0, read: 0, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write: 0, read: 0, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= cr, write: 0, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= cr, write: 0, read:add, close: 0 */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write: 0, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write: 0, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= cr, write: 0, read:add, close:xxx */
+ { 0, 255 },
+ /* old= cr, write: 0, read:del, close: 0 */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write: 0, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write: 0, read:del, close:del */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= cr, write: 0, read:del, close:xxx */
+ { 0, 255 },
+ /* old= cr, write: 0, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= cr, write: 0, read:xxx, close:add */
+ { 0, 255 },
+ /* old= cr, write: 0, read:xxx, close:del */
+ { 0, 255 },
+ /* old= cr, write: 0, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:add, read: 0, close: 0 */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:add, read: 0, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:add, read: 0, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= cr, write:add, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:add, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:add, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:add, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= cr, write:add, read:add, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:add, read:del, close: 0 */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:add, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:add, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= cr, write:add, read:del, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:add, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= cr, write:add, read:xxx, close:add */
+ { 0, 255 },
+ /* old= cr, write:add, read:xxx, close:del */
+ { 0, 255 },
+ /* old= cr, write:add, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:del, read: 0, close: 0 */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:del, read: 0, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:del, read: 0, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= cr, write:del, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:del, read:add, close: 0 */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:del, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:del, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= cr, write:del, read:add, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:del, read:del, close: 0 */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:del, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:del, read:del, close:del */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= cr, write:del, read:del, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:del, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= cr, write:del, read:xxx, close:add */
+ { 0, 255 },
+ /* old= cr, write:del, read:xxx, close:del */
+ { 0, 255 },
+ /* old= cr, write:del, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:xxx, read: 0, close: 0 */
+ { 0, 255 },
+ /* old= cr, write:xxx, read: 0, close:add */
+ { 0, 255 },
+ /* old= cr, write:xxx, read: 0, close:del */
+ { 0, 255 },
+ /* old= cr, write:xxx, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:add, close: 0 */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:add, close:add */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:add, close:del */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:add, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:del, close: 0 */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:del, close:add */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:del, close:del */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:del, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:xxx, close:add */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:xxx, close:del */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= cw, write: 0, read: 0, close: 0 */
+ { 0, 0 },
+ /* old= cw, write: 0, read: 0, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write: 0, read: 0, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= cw, write: 0, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= cw, write: 0, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write: 0, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write: 0, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= cw, write: 0, read:add, close:xxx */
+ { 0, 255 },
+ /* old= cw, write: 0, read:del, close: 0 */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write: 0, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write: 0, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= cw, write: 0, read:del, close:xxx */
+ { 0, 255 },
+ /* old= cw, write: 0, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= cw, write: 0, read:xxx, close:add */
+ { 0, 255 },
+ /* old= cw, write: 0, read:xxx, close:del */
+ { 0, 255 },
+ /* old= cw, write: 0, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:add, read: 0, close: 0 */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:add, read: 0, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:add, read: 0, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= cw, write:add, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:add, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:add, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:add, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= cw, write:add, read:add, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:add, read:del, close: 0 */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:add, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:add, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= cw, write:add, read:del, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:add, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= cw, write:add, read:xxx, close:add */
+ { 0, 255 },
+ /* old= cw, write:add, read:xxx, close:del */
+ { 0, 255 },
+ /* old= cw, write:add, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:del, read: 0, close: 0 */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:del, read: 0, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:del, read: 0, close:del */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= cw, write:del, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:del, read:add, close: 0 */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:del, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:del, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= cw, write:del, read:add, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:del, read:del, close: 0 */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:del, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:del, read:del, close:del */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= cw, write:del, read:del, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:del, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= cw, write:del, read:xxx, close:add */
+ { 0, 255 },
+ /* old= cw, write:del, read:xxx, close:del */
+ { 0, 255 },
+ /* old= cw, write:del, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:xxx, read: 0, close: 0 */
+ { 0, 255 },
+ /* old= cw, write:xxx, read: 0, close:add */
+ { 0, 255 },
+ /* old= cw, write:xxx, read: 0, close:del */
+ { 0, 255 },
+ /* old= cw, write:xxx, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:add, close: 0 */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:add, close:add */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:add, close:del */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:add, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:del, close: 0 */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:del, close:add */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:del, close:del */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:del, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:xxx, close:add */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:xxx, close:del */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old=crw, write: 0, read: 0, close: 0 */
+ { 0, 0 },
+ /* old=crw, write: 0, read: 0, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write: 0, read: 0, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old=crw, write: 0, read: 0, close:xxx */
+ { 0, 255 },
+ /* old=crw, write: 0, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write: 0, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write: 0, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old=crw, write: 0, read:add, close:xxx */
+ { 0, 255 },
+ /* old=crw, write: 0, read:del, close: 0 */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write: 0, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write: 0, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old=crw, write: 0, read:del, close:xxx */
+ { 0, 255 },
+ /* old=crw, write: 0, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old=crw, write: 0, read:xxx, close:add */
+ { 0, 255 },
+ /* old=crw, write: 0, read:xxx, close:del */
+ { 0, 255 },
+ /* old=crw, write: 0, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:add, read: 0, close: 0 */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:add, read: 0, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:add, read: 0, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old=crw, write:add, read: 0, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:add, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:add, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:add, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old=crw, write:add, read:add, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:add, read:del, close: 0 */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:add, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:add, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old=crw, write:add, read:del, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:add, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old=crw, write:add, read:xxx, close:add */
+ { 0, 255 },
+ /* old=crw, write:add, read:xxx, close:del */
+ { 0, 255 },
+ /* old=crw, write:add, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:del, read: 0, close: 0 */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:del, read: 0, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:del, read: 0, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old=crw, write:del, read: 0, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:del, read:add, close: 0 */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:del, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:del, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old=crw, write:del, read:add, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:del, read:del, close: 0 */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:del, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:del, read:del, close:del */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old=crw, write:del, read:del, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:del, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old=crw, write:del, read:xxx, close:add */
+ { 0, 255 },
+ /* old=crw, write:del, read:xxx, close:del */
+ { 0, 255 },
+ /* old=crw, write:del, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:xxx, read: 0, close: 0 */
+ { 0, 255 },
+ /* old=crw, write:xxx, read: 0, close:add */
+ { 0, 255 },
+ /* old=crw, write:xxx, read: 0, close:del */
+ { 0, 255 },
+ /* old=crw, write:xxx, read: 0, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:add, close: 0 */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:add, close:add */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:add, close:del */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:add, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:del, close: 0 */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:del, close:add */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:del, close:del */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:del, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:xxx, close:add */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:xxx, close:del */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:xxx, close:xxx */
+ { 0, 255 },
+};
+
+#endif
diff --git a/libs/libevent/src/evbuffer-internal.h b/libs/libevent/src/evbuffer-internal.h
new file mode 100644
index 0000000000..cf4bddc80e
--- /dev/null
+++ b/libs/libevent/src/evbuffer-internal.h
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVBUFFER_INTERNAL_H_INCLUDED_
+#define EVBUFFER_INTERNAL_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+#include "event2/util.h"
+#include "event2/event_struct.h"
+#include "util-internal.h"
+#include "defer-internal.h"
+
+/* Experimental cb flag: "never deferred." Implementation note:
+ * these callbacks may get an inaccurate view of n_del/n_added in their
+ * arguments. */
+#define EVBUFFER_CB_NODEFER 2
+
+#ifdef _WIN32
+#include <winsock2.h>
+#endif
+#include <sys/queue.h>
+
+/* Minimum allocation for a chain. We define this so that we're burning no
+ * more than 5% of each allocation on overhead. It would be nice to lose even
+ * less space, though. */
+#if EVENT__SIZEOF_VOID_P < 8
+#define MIN_BUFFER_SIZE 512
+#else
+#define MIN_BUFFER_SIZE 1024
+#endif
+
+/** A single evbuffer callback for an evbuffer. This function will be invoked
+ * when bytes are added to or removed from the evbuffer. */
+struct evbuffer_cb_entry {
+ /** Structures to implement a doubly-linked queue of callbacks */
+ LIST_ENTRY(evbuffer_cb_entry) next;
+ /** The callback function to invoke when this callback is called.
+ If EVBUFFER_CB_OBSOLETE is set in flags, the cb_obsolete field is
+ valid; otherwise, cb_func is valid. */
+ union {
+ evbuffer_cb_func cb_func;
+ evbuffer_cb cb_obsolete;
+ } cb;
+ /** Argument to pass to cb. */
+ void *cbarg;
+ /** Currently set flags on this callback. */
+ ev_uint32_t flags;
+};
+
+struct bufferevent;
+struct evbuffer_chain;
+struct evbuffer {
+ /** The first chain in this buffer's linked list of chains. */
+ struct evbuffer_chain *first;
+ /** The last chain in this buffer's linked list of chains. */
+ struct evbuffer_chain *last;
+
+ /** Pointer to the next pointer pointing at the 'last_with_data' chain.
+ *
+ * To unpack:
+ *
+ * The last_with_data chain is the last chain that has any data in it.
+ * If all chains in the buffer are empty, it is the first chain.
+ * If the buffer has no chains, it is NULL.
+ *
+ * The last_with_datap pointer points at _whatever 'next' pointer_
+ * points at the last_with_datap chain. If the last_with_data chain
+ * is the first chain, or it is NULL, then the last_with_datap pointer
+ * is &buf->first.
+ */
+ struct evbuffer_chain **last_with_datap;
+
+ /** Total amount of bytes stored in all chains.*/
+ size_t total_len;
+
+ /** Number of bytes we have added to the buffer since we last tried to
+ * invoke callbacks. */
+ size_t n_add_for_cb;
+ /** Number of bytes we have removed from the buffer since we last
+ * tried to invoke callbacks. */
+ size_t n_del_for_cb;
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ /** A lock used to mediate access to this buffer. */
+ void *lock;
+#endif
+ /** True iff we should free the lock field when we free this
+ * evbuffer. */
+ unsigned own_lock : 1;
+ /** True iff we should not allow changes to the front of the buffer
+ * (drains or prepends). */
+ unsigned freeze_start : 1;
+ /** True iff we should not allow changes to the end of the buffer
+ * (appends) */
+ unsigned freeze_end : 1;
+ /** True iff this evbuffer's callbacks are not invoked immediately
+ * upon a change in the buffer, but instead are deferred to be invoked
+ * from the event_base's loop. Useful for preventing enormous stack
+ * overflows when we have mutually recursive callbacks, and for
+ * serializing callbacks in a single thread. */
+ unsigned deferred_cbs : 1;
+#ifdef _WIN32
+ /** True iff this buffer is set up for overlapped IO. */
+ unsigned is_overlapped : 1;
+#endif
+ /** Zero or more EVBUFFER_FLAG_* bits */
+ ev_uint32_t flags;
+
+ /** Used to implement deferred callbacks. */
+ struct event_base *cb_queue;
+
+ /** A reference count on this evbuffer. When the reference count
+ * reaches 0, the buffer is destroyed. Manipulated with
+ * evbuffer_incref and evbuffer_decref_and_unlock and
+ * evbuffer_free. */
+ int refcnt;
+
+ /** A struct event_callback handle to make all of this buffer's callbacks
+ * invoked from the event loop. */
+ struct event_callback deferred;
+
+ /** A doubly-linked-list of callback functions */
+ LIST_HEAD(evbuffer_cb_queue, evbuffer_cb_entry) callbacks;
+
+ /** The parent bufferevent object this evbuffer belongs to.
+ * NULL if the evbuffer stands alone. */
+ struct bufferevent *parent;
+};
+
+#if EVENT__SIZEOF_OFF_T < EVENT__SIZEOF_SIZE_T
+typedef ev_ssize_t ev_misalign_t;
+#define EVBUFFER_CHAIN_MAX ((size_t)EV_SSIZE_MAX)
+#else
+typedef ev_off_t ev_misalign_t;
+#if EVENT__SIZEOF_OFF_T > EVENT__SIZEOF_SIZE_T
+#define EVBUFFER_CHAIN_MAX EV_SIZE_MAX
+#else
+#define EVBUFFER_CHAIN_MAX ((size_t)EV_SSIZE_MAX)
+#endif
+#endif
+
+/** A single item in an evbuffer. */
+struct evbuffer_chain {
+ /** points to next buffer in the chain */
+ struct evbuffer_chain *next;
+
+ /** total allocation available in the buffer field. */
+ size_t buffer_len;
+
+ /** unused space at the beginning of buffer or an offset into a
+ * file for sendfile buffers. */
+ ev_misalign_t misalign;
+
+ /** Offset into buffer + misalign at which to start writing.
+ * In other words, the total number of bytes actually stored
+ * in buffer. */
+ size_t off;
+
+ /** Set if special handling is required for this chain */
+ unsigned flags;
+#define EVBUFFER_FILESEGMENT 0x0001 /**< A chain used for a file segment */
+#define EVBUFFER_SENDFILE 0x0002 /**< a chain used with sendfile */
+#define EVBUFFER_REFERENCE 0x0004 /**< a chain with a mem reference */
+#define EVBUFFER_IMMUTABLE 0x0008 /**< read-only chain */
+ /** a chain that mustn't be reallocated or freed, or have its contents
+ * memmoved, until the chain is un-pinned. */
+#define EVBUFFER_MEM_PINNED_R 0x0010
+#define EVBUFFER_MEM_PINNED_W 0x0020
+#define EVBUFFER_MEM_PINNED_ANY (EVBUFFER_MEM_PINNED_R|EVBUFFER_MEM_PINNED_W)
+ /** a chain that should be freed, but can't be freed until it is
+ * un-pinned. */
+#define EVBUFFER_DANGLING 0x0040
+ /** a chain that is a referenced copy of another chain */
+#define EVBUFFER_MULTICAST 0x0080
+
+ /** number of references to this chain */
+ int refcnt;
+
+ /** Usually points to the read-write memory belonging to this
+ * buffer allocated as part of the evbuffer_chain allocation.
+ * For mmap, this can be a read-only buffer and
+ * EVBUFFER_IMMUTABLE will be set in flags. For sendfile, it
+ * may point to NULL.
+ */
+ unsigned char *buffer;
+};
+
+/** callback for a reference chain; lets us know what to do with it when
+ * we're done with it. Lives at the end of an evbuffer_chain with the
+ * EVBUFFER_REFERENCE flag set */
+struct evbuffer_chain_reference {
+ evbuffer_ref_cleanup_cb cleanupfn;
+ void *extra;
+};
+
+/** File segment for a file-segment chain. Lives at the end of an
+ * evbuffer_chain with the EVBUFFER_FILESEGMENT flag set. */
+struct evbuffer_chain_file_segment {
+ struct evbuffer_file_segment *segment;
+#ifdef _WIN32
+ /** If we're using CreateFileMapping, this is the handle to the view. */
+ HANDLE view_handle;
+#endif
+};
+
+/* Declared in event2/buffer.h; defined here. */
+struct evbuffer_file_segment {
+ void *lock; /**< lock prevent concurrent access to refcnt */
+ int refcnt; /**< Reference count for this file segment */
+ unsigned flags; /**< combination of EVBUF_FS_* flags */
+
+ /** What kind of file segment is this? */
+ unsigned can_sendfile : 1;
+ unsigned is_mapping : 1;
+
+ /** The fd that we read the data from. */
+ int fd;
+ /** If we're using mmap, this is the raw mapped memory. */
+ void *mapping;
+#ifdef _WIN32
+ /** If we're using CreateFileMapping, this is the mapping */
+ HANDLE mapping_handle;
+#endif
+ /** If we're using mmap or IO, this is the content of the file
+ * segment. */
+ char *contents;
+ /** Position of this segment within the file. */
+ ev_off_t file_offset;
+ /** If we're using mmap, this is the offset within 'mapping' where
+ * this data segment begins. */
+ ev_off_t mmap_offset;
+ /** The length of this segment. */
+ ev_off_t length;
+ /** Cleanup callback function */
+ evbuffer_file_segment_cleanup_cb cleanup_cb;
+ /** Argument to be pass to cleanup callback function */
+ void *cleanup_cb_arg;
+};
+
+/** Information about the multicast parent of a chain. Lives at the
+ * end of an evbuffer_chain with the EVBUFFER_MULTICAST flag set. */
+struct evbuffer_multicast_parent {
+ /** source buffer the multicast parent belongs to */
+ struct evbuffer *source;
+ /** multicast parent for this chain */
+ struct evbuffer_chain *parent;
+};
+
+#define EVBUFFER_CHAIN_SIZE sizeof(struct evbuffer_chain)
+/** Return a pointer to extra data allocated along with an evbuffer. */
+#define EVBUFFER_CHAIN_EXTRA(t, c) (t *)((struct evbuffer_chain *)(c) + 1)
+
+/** Assert that we are holding the lock on an evbuffer */
+#define ASSERT_EVBUFFER_LOCKED(buffer) \
+ EVLOCK_ASSERT_LOCKED((buffer)->lock)
+
+#define EVBUFFER_LOCK(buffer) \
+ do { \
+ EVLOCK_LOCK((buffer)->lock, 0); \
+ } while (0)
+#define EVBUFFER_UNLOCK(buffer) \
+ do { \
+ EVLOCK_UNLOCK((buffer)->lock, 0); \
+ } while (0)
+#define EVBUFFER_LOCK2(buffer1, buffer2) \
+ do { \
+ EVLOCK_LOCK2((buffer1)->lock, (buffer2)->lock, 0, 0); \
+ } while (0)
+#define EVBUFFER_UNLOCK2(buffer1, buffer2) \
+ do { \
+ EVLOCK_UNLOCK2((buffer1)->lock, (buffer2)->lock, 0, 0); \
+ } while (0)
+
+/** Increase the reference count of buf by one. */
+void evbuffer_incref_(struct evbuffer *buf);
+/** Increase the reference count of buf by one and acquire the lock. */
+void evbuffer_incref_and_lock_(struct evbuffer *buf);
+/** Pin a single buffer chain using a given flag. A pinned chunk may not be
+ * moved or freed until it is unpinned. */
+void evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag);
+/** Unpin a single buffer chain using a given flag. */
+void evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag);
+/** As evbuffer_free, but requires that we hold a lock on the buffer, and
+ * releases the lock before freeing it and the buffer. */
+void evbuffer_decref_and_unlock_(struct evbuffer *buffer);
+
+/** As evbuffer_expand, but does not guarantee that the newly allocated memory
+ * is contiguous. Instead, it may be split across two or more chunks. */
+int evbuffer_expand_fast_(struct evbuffer *, size_t, int);
+
+/** Helper: prepares for a readv/WSARecv call by expanding the buffer to
+ * hold enough memory to read 'howmuch' bytes in possibly noncontiguous memory.
+ * Sets up the one or two iovecs in 'vecs' to point to the free memory and its
+ * extent, and *chainp to point to the first chain that we'll try to read into.
+ * Returns the number of vecs used.
+ */
+int evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch,
+ struct evbuffer_iovec *vecs, int n_vecs, struct evbuffer_chain ***chainp,
+ int exact);
+
+/* Helper macro: copies an evbuffer_iovec in ei to a win32 WSABUF in i. */
+#define WSABUF_FROM_EVBUFFER_IOV(i,ei) do { \
+ (i)->buf = (ei)->iov_base; \
+ (i)->len = (unsigned long)(ei)->iov_len; \
+ } while (0)
+/* XXXX the cast above is safe for now, but not if we allow mmaps on win64.
+ * See note in buffer_iocp's launch_write function */
+
+/** Set the parent bufferevent object for buf to bev */
+void evbuffer_set_parent_(struct evbuffer *buf, struct bufferevent *bev);
+
+void evbuffer_invoke_callbacks_(struct evbuffer *buf);
+
+
+int evbuffer_get_callbacks_(struct evbuffer *buffer,
+ struct event_callback **cbs,
+ int max_cbs);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVBUFFER_INTERNAL_H_INCLUDED_ */
diff --git a/libs/libevent/src/evdns.c b/libs/libevent/src/evdns.c
new file mode 100644
index 0000000000..c4112330a9
--- /dev/null
+++ b/libs/libevent/src/evdns.c
@@ -0,0 +1,4761 @@
+/* Copyright 2006-2007 Niels Provos
+ * Copyright 2007-2012 Nick Mathewson and Niels Provos
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Based on software by Adam Langly. Adam's original message:
+ *
+ * Async DNS Library
+ * Adam Langley <agl@imperialviolet.org>
+ * http://www.imperialviolet.org/eventdns.html
+ * Public Domain code
+ *
+ * This software is Public Domain. To view a copy of the public domain dedication,
+ * visit http://creativecommons.org/licenses/publicdomain/ or send a letter to
+ * Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
+ *
+ * I ask and expect, but do not require, that all derivative works contain an
+ * attribution similar to:
+ * Parts developed by Adam Langley <agl@imperialviolet.org>
+ *
+ * You may wish to replace the word "Parts" with something else depending on
+ * the amount of original code.
+ *
+ * (Derivative works does not include programs which link against, run or include
+ * the source verbatim in their source distributions)
+ *
+ * Version: 0.1b
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+
+#ifndef _FORTIFY_SOURCE
+#define _FORTIFY_SOURCE 3
+#endif
+
+#include <string.h>
+#include <fcntl.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef EVENT__HAVE_STDINT_H
+#include <stdint.h>
+#endif
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <limits.h>
+#include <sys/stat.h>
+#include <stdio.h>
+#include <stdarg.h>
+#ifdef _WIN32
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#ifndef _WIN32_IE
+#define _WIN32_IE 0x400
+#endif
+#include <shlobj.h>
+#endif
+
+#include "event2/dns.h"
+#include "event2/dns_struct.h"
+#include "event2/dns_compat.h"
+#include "event2/util.h"
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "event2/thread.h"
+
+#include "defer-internal.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "strlcpy-internal.h"
+#include "ipv6-internal.h"
+#include "util-internal.h"
+#include "evthread-internal.h"
+#ifdef _WIN32
+#include <ctype.h>
+#include <winsock2.h>
+#include <windows.h>
+#include <iphlpapi.h>
+#include <io.h>
+#else
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#endif
+
+#ifdef EVENT__HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif
+
+#define EVDNS_LOG_DEBUG EVENT_LOG_DEBUG
+#define EVDNS_LOG_WARN EVENT_LOG_WARN
+#define EVDNS_LOG_MSG EVENT_LOG_MSG
+
+#ifndef HOST_NAME_MAX
+#define HOST_NAME_MAX 255
+#endif
+
+#include <stdio.h>
+
+#undef MIN
+#define MIN(a,b) ((a)<(b)?(a):(b))
+
+#define ASSERT_VALID_REQUEST(req) \
+ EVUTIL_ASSERT((req)->handle && (req)->handle->current_req == (req))
+
+#define u64 ev_uint64_t
+#define u32 ev_uint32_t
+#define u16 ev_uint16_t
+#define u8 ev_uint8_t
+
+/* maximum number of addresses from a single packet */
+/* that we bother recording */
+#define MAX_V4_ADDRS 32
+#define MAX_V6_ADDRS 32
+
+
+#define TYPE_A EVDNS_TYPE_A
+#define TYPE_CNAME 5
+#define TYPE_PTR EVDNS_TYPE_PTR
+#define TYPE_SOA EVDNS_TYPE_SOA
+#define TYPE_AAAA EVDNS_TYPE_AAAA
+
+#define CLASS_INET EVDNS_CLASS_INET
+
+/* Persistent handle. We keep this separate from 'struct request' since we
+ * need some object to last for as long as an evdns_request is outstanding so
+ * that it can be canceled, whereas a search request can lead to multiple
+ * 'struct request' instances being created over its lifetime. */
+struct evdns_request {
+ struct request *current_req;
+ struct evdns_base *base;
+
+ int pending_cb; /* Waiting for its callback to be invoked; not
+ * owned by event base any more. */
+
+ /* elements used by the searching code */
+ int search_index;
+ struct search_state *search_state;
+ char *search_origname; /* needs to be free()ed */
+ int search_flags;
+};
+
+struct request {
+ u8 *request; /* the dns packet data */
+ u8 request_type; /* TYPE_PTR or TYPE_A or TYPE_AAAA */
+ unsigned int request_len;
+ int reissue_count;
+ int tx_count; /* the number of times that this packet has been sent */
+ void *user_pointer; /* the pointer given to us for this request */
+ evdns_callback_type user_callback;
+ struct nameserver *ns; /* the server which we last sent it */
+
+ /* these objects are kept in a circular list */
+ /* XXX We could turn this into a CIRCLEQ. */
+ struct request *next, *prev;
+
+ struct event timeout_event;
+
+ u16 trans_id; /* the transaction id */
+ unsigned request_appended :1; /* true if the request pointer is data which follows this struct */
+ unsigned transmit_me :1; /* needs to be transmitted */
+
+ /* XXXX This is a horrible hack. */
+ char **put_cname_in_ptr; /* store the cname here if we get one. */
+
+ struct evdns_base *base;
+
+ struct evdns_request *handle;
+};
+
+struct reply {
+ unsigned int type;
+ unsigned int have_answer : 1;
+ union {
+ struct {
+ u32 addrcount;
+ u32 addresses[MAX_V4_ADDRS];
+ } a;
+ struct {
+ u32 addrcount;
+ struct in6_addr addresses[MAX_V6_ADDRS];
+ } aaaa;
+ struct {
+ char name[HOST_NAME_MAX];
+ } ptr;
+ } data;
+};
+
+struct nameserver {
+ evutil_socket_t socket; /* a connected UDP socket */
+ struct sockaddr_storage address;
+ ev_socklen_t addrlen;
+ int failed_times; /* number of times which we have given this server a chance */
+ int timedout; /* number of times in a row a request has timed out */
+ struct event event;
+ /* these objects are kept in a circular list */
+ struct nameserver *next, *prev;
+ struct event timeout_event; /* used to keep the timeout for */
+ /* when we next probe this server. */
+ /* Valid if state == 0 */
+ /* Outstanding probe request for this nameserver, if any */
+ struct evdns_request *probe_request;
+ char state; /* zero if we think that this server is down */
+ char choked; /* true if we have an EAGAIN from this server's socket */
+ char write_waiting; /* true if we are waiting for EV_WRITE events */
+ struct evdns_base *base;
+
+ /* Number of currently inflight requests: used
+ * to track when we should add/del the event. */
+ int requests_inflight;
+};
+
+
+/* Represents a local port where we're listening for DNS requests. Right now, */
+/* only UDP is supported. */
+struct evdns_server_port {
+ evutil_socket_t socket; /* socket we use to read queries and write replies. */
+ int refcnt; /* reference count. */
+ char choked; /* Are we currently blocked from writing? */
+ char closing; /* Are we trying to close this port, pending writes? */
+ evdns_request_callback_fn_type user_callback; /* Fn to handle requests */
+ void *user_data; /* Opaque pointer passed to user_callback */
+ struct event event; /* Read/write event */
+ /* circular list of replies that we want to write. */
+ struct server_request *pending_replies;
+ struct event_base *event_base;
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ void *lock;
+#endif
+};
+
+/* Represents part of a reply being built. (That is, a single RR.) */
+struct server_reply_item {
+ struct server_reply_item *next; /* next item in sequence. */
+ char *name; /* name part of the RR */
+ u16 type; /* The RR type */
+ u16 class; /* The RR class (usually CLASS_INET) */
+ u32 ttl; /* The RR TTL */
+ char is_name; /* True iff data is a label */
+ u16 datalen; /* Length of data; -1 if data is a label */
+ void *data; /* The contents of the RR */
+};
+
+/* Represents a request that we've received as a DNS server, and holds */
+/* the components of the reply as we're constructing it. */
+struct server_request {
+ /* Pointers to the next and previous entries on the list of replies */
+ /* that we're waiting to write. Only set if we have tried to respond */
+ /* and gotten EAGAIN. */
+ struct server_request *next_pending;
+ struct server_request *prev_pending;
+
+ u16 trans_id; /* Transaction id. */
+ struct evdns_server_port *port; /* Which port received this request on? */
+ struct sockaddr_storage addr; /* Where to send the response */
+ ev_socklen_t addrlen; /* length of addr */
+
+ int n_answer; /* how many answer RRs have been set? */
+ int n_authority; /* how many authority RRs have been set? */
+ int n_additional; /* how many additional RRs have been set? */
+
+ struct server_reply_item *answer; /* linked list of answer RRs */
+ struct server_reply_item *authority; /* linked list of authority RRs */
+ struct server_reply_item *additional; /* linked list of additional RRs */
+
+ /* Constructed response. Only set once we're ready to send a reply. */
+ /* Once this is set, the RR fields are cleared, and no more should be set. */
+ char *response;
+ size_t response_len;
+
+ /* Caller-visible fields: flags, questions. */
+ struct evdns_server_request base;
+};
+
+struct evdns_base {
+ /* An array of n_req_heads circular lists for inflight requests.
+ * Each inflight request req is in req_heads[req->trans_id % n_req_heads].
+ */
+ struct request **req_heads;
+ /* A circular list of requests that we're waiting to send, but haven't
+ * sent yet because there are too many requests inflight */
+ struct request *req_waiting_head;
+ /* A circular list of nameservers. */
+ struct nameserver *server_head;
+ int n_req_heads;
+
+ struct event_base *event_base;
+
+ /* The number of good nameservers that we have */
+ int global_good_nameservers;
+
+ /* inflight requests are contained in the req_head list */
+ /* and are actually going out across the network */
+ int global_requests_inflight;
+ /* requests which aren't inflight are in the waiting list */
+ /* and are counted here */
+ int global_requests_waiting;
+
+ int global_max_requests_inflight;
+
+ struct timeval global_timeout; /* 5 seconds by default */
+ int global_max_reissues; /* a reissue occurs when we get some errors from the server */
+ int global_max_retransmits; /* number of times we'll retransmit a request which timed out */
+ /* number of timeouts in a row before we consider this server to be down */
+ int global_max_nameserver_timeout;
+ /* true iff we will use the 0x20 hack to prevent poisoning attacks. */
+ int global_randomize_case;
+
+ /* The first time that a nameserver fails, how long do we wait before
+ * probing to see if it has returned? */
+ struct timeval global_nameserver_probe_initial_timeout;
+
+ /** Port to bind to for outgoing DNS packets. */
+ struct sockaddr_storage global_outgoing_address;
+ /** ev_socklen_t for global_outgoing_address. 0 if it isn't set. */
+ ev_socklen_t global_outgoing_addrlen;
+
+ struct timeval global_getaddrinfo_allow_skew;
+
+ int getaddrinfo_ipv4_timeouts;
+ int getaddrinfo_ipv6_timeouts;
+ int getaddrinfo_ipv4_answered;
+ int getaddrinfo_ipv6_answered;
+
+ struct search_state *global_search_state;
+
+ TAILQ_HEAD(hosts_list, hosts_entry) hostsdb;
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ void *lock;
+#endif
+
+ int disable_when_inactive;
+};
+
+struct hosts_entry {
+ TAILQ_ENTRY(hosts_entry) next;
+ union {
+ struct sockaddr sa;
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ } addr;
+ int addrlen;
+ char hostname[1];
+};
+
+static struct evdns_base *current_base = NULL;
+
+struct evdns_base *
+evdns_get_global_base(void)
+{
+ return current_base;
+}
+
+/* Given a pointer to an evdns_server_request, get the corresponding */
+/* server_request. */
+#define TO_SERVER_REQUEST(base_ptr) \
+ ((struct server_request*) \
+ (((char*)(base_ptr) - evutil_offsetof(struct server_request, base))))
+
+#define REQ_HEAD(base, id) ((base)->req_heads[id % (base)->n_req_heads])
+
+static struct nameserver *nameserver_pick(struct evdns_base *base);
+static void evdns_request_insert(struct request *req, struct request **head);
+static void evdns_request_remove(struct request *req, struct request **head);
+static void nameserver_ready_callback(evutil_socket_t fd, short events, void *arg);
+static int evdns_transmit(struct evdns_base *base);
+static int evdns_request_transmit(struct request *req);
+static void nameserver_send_probe(struct nameserver *const ns);
+static void search_request_finished(struct evdns_request *const);
+static int search_try_next(struct evdns_request *const req);
+static struct request *search_request_new(struct evdns_base *base, struct evdns_request *handle, int type, const char *const name, int flags, evdns_callback_type user_callback, void *user_arg);
+static void evdns_requests_pump_waiting_queue(struct evdns_base *base);
+static u16 transaction_id_pick(struct evdns_base *base);
+static struct request *request_new(struct evdns_base *base, struct evdns_request *handle, int type, const char *name, int flags, evdns_callback_type callback, void *ptr);
+static void request_submit(struct request *const req);
+
+static int server_request_free(struct server_request *req);
+static void server_request_free_answers(struct server_request *req);
+static void server_port_free(struct evdns_server_port *port);
+static void server_port_ready_callback(evutil_socket_t fd, short events, void *arg);
+static int evdns_base_resolv_conf_parse_impl(struct evdns_base *base, int flags, const char *const filename);
+static int evdns_base_set_option_impl(struct evdns_base *base,
+ const char *option, const char *val, int flags);
+static void evdns_base_free_and_unlock(struct evdns_base *base, int fail_requests);
+static void evdns_request_timeout_callback(evutil_socket_t fd, short events, void *arg);
+
+static int strtoint(const char *const str);
+
+#ifdef EVENT__DISABLE_THREAD_SUPPORT
+#define EVDNS_LOCK(base) EVUTIL_NIL_STMT_
+#define EVDNS_UNLOCK(base) EVUTIL_NIL_STMT_
+#define ASSERT_LOCKED(base) EVUTIL_NIL_STMT_
+#else
+#define EVDNS_LOCK(base) \
+ EVLOCK_LOCK((base)->lock, 0)
+#define EVDNS_UNLOCK(base) \
+ EVLOCK_UNLOCK((base)->lock, 0)
+#define ASSERT_LOCKED(base) \
+ EVLOCK_ASSERT_LOCKED((base)->lock)
+#endif
+
+static evdns_debug_log_fn_type evdns_log_fn = NULL;
+
+void
+evdns_set_log_fn(evdns_debug_log_fn_type fn)
+{
+ evdns_log_fn = fn;
+}
+
+#ifdef __GNUC__
+#define EVDNS_LOG_CHECK __attribute__ ((format(printf, 2, 3)))
+#else
+#define EVDNS_LOG_CHECK
+#endif
+
+static void evdns_log_(int severity, const char *fmt, ...) EVDNS_LOG_CHECK;
+static void
+evdns_log_(int severity, const char *fmt, ...)
+{
+ va_list args;
+ va_start(args,fmt);
+ if (evdns_log_fn) {
+ char buf[512];
+ int is_warn = (severity == EVDNS_LOG_WARN);
+ evutil_vsnprintf(buf, sizeof(buf), fmt, args);
+ evdns_log_fn(is_warn, buf);
+ } else {
+ event_logv_(severity, NULL, fmt, args);
+ }
+ va_end(args);
+}
+
+#define log evdns_log_
+
+/* This walks the list of inflight requests to find the */
+/* one with a matching transaction id. Returns NULL on */
+/* failure */
+static struct request *
+request_find_from_trans_id(struct evdns_base *base, u16 trans_id) {
+ struct request *req = REQ_HEAD(base, trans_id);
+ struct request *const started_at = req;
+
+ ASSERT_LOCKED(base);
+
+ if (req) {
+ do {
+ if (req->trans_id == trans_id) return req;
+ req = req->next;
+ } while (req != started_at);
+ }
+
+ return NULL;
+}
+
+/* a libevent callback function which is called when a nameserver */
+/* has gone down and we want to test if it has came back to life yet */
+static void
+nameserver_prod_callback(evutil_socket_t fd, short events, void *arg) {
+ struct nameserver *const ns = (struct nameserver *) arg;
+ (void)fd;
+ (void)events;
+
+ EVDNS_LOCK(ns->base);
+ nameserver_send_probe(ns);
+ EVDNS_UNLOCK(ns->base);
+}
+
+/* a libevent callback which is called when a nameserver probe (to see if */
+/* it has come back to life) times out. We increment the count of failed_times */
+/* and wait longer to send the next probe packet. */
+static void
+nameserver_probe_failed(struct nameserver *const ns) {
+ struct timeval timeout;
+ int i;
+
+ ASSERT_LOCKED(ns->base);
+ (void) evtimer_del(&ns->timeout_event);
+ if (ns->state == 1) {
+ /* This can happen if the nameserver acts in a way which makes us mark */
+ /* it as bad and then starts sending good replies. */
+ return;
+ }
+
+#define MAX_PROBE_TIMEOUT 3600
+#define TIMEOUT_BACKOFF_FACTOR 3
+
+ memcpy(&timeout, &ns->base->global_nameserver_probe_initial_timeout,
+ sizeof(struct timeval));
+ for (i=ns->failed_times; i > 0 && timeout.tv_sec < MAX_PROBE_TIMEOUT; --i) {
+ timeout.tv_sec *= TIMEOUT_BACKOFF_FACTOR;
+ timeout.tv_usec *= TIMEOUT_BACKOFF_FACTOR;
+ if (timeout.tv_usec > 1000000) {
+ timeout.tv_sec += timeout.tv_usec / 1000000;
+ timeout.tv_usec %= 1000000;
+ }
+ }
+ if (timeout.tv_sec > MAX_PROBE_TIMEOUT) {
+ timeout.tv_sec = MAX_PROBE_TIMEOUT;
+ timeout.tv_usec = 0;
+ }
+
+ ns->failed_times++;
+
+ if (evtimer_add(&ns->timeout_event, &timeout) < 0) {
+ char addrbuf[128];
+ log(EVDNS_LOG_WARN,
+ "Error from libevent when adding timer event for %s",
+ evutil_format_sockaddr_port_(
+ (struct sockaddr *)&ns->address,
+ addrbuf, sizeof(addrbuf)));
+ }
+}
+
+static void
+request_swap_ns(struct request *req, struct nameserver *ns) {
+ if (ns && req->ns != ns) {
+ EVUTIL_ASSERT(req->ns->requests_inflight > 0);
+ req->ns->requests_inflight--;
+ ns->requests_inflight++;
+
+ req->ns = ns;
+ }
+}
+
+/* called when a nameserver has been deemed to have failed. For example, too */
+/* many packets have timed out etc */
+static void
+nameserver_failed(struct nameserver *const ns, const char *msg) {
+ struct request *req, *started_at;
+ struct evdns_base *base = ns->base;
+ int i;
+ char addrbuf[128];
+
+ ASSERT_LOCKED(base);
+ /* if this nameserver has already been marked as failed */
+ /* then don't do anything */
+ if (!ns->state) return;
+
+ log(EVDNS_LOG_MSG, "Nameserver %s has failed: %s",
+ evutil_format_sockaddr_port_(
+ (struct sockaddr *)&ns->address,
+ addrbuf, sizeof(addrbuf)),
+ msg);
+
+ base->global_good_nameservers--;
+ EVUTIL_ASSERT(base->global_good_nameservers >= 0);
+ if (base->global_good_nameservers == 0) {
+ log(EVDNS_LOG_MSG, "All nameservers have failed");
+ }
+
+ ns->state = 0;
+ ns->failed_times = 1;
+
+ if (evtimer_add(&ns->timeout_event,
+ &base->global_nameserver_probe_initial_timeout) < 0) {
+ log(EVDNS_LOG_WARN,
+ "Error from libevent when adding timer event for %s",
+ evutil_format_sockaddr_port_(
+ (struct sockaddr *)&ns->address,
+ addrbuf, sizeof(addrbuf)));
+ /* ???? Do more? */
+ }
+
+ /* walk the list of inflight requests to see if any can be reassigned to */
+ /* a different server. Requests in the waiting queue don't have a */
+ /* nameserver assigned yet */
+
+ /* if we don't have *any* good nameservers then there's no point */
+ /* trying to reassign requests to one */
+ if (!base->global_good_nameservers) return;
+
+ for (i = 0; i < base->n_req_heads; ++i) {
+ req = started_at = base->req_heads[i];
+ if (req) {
+ do {
+ if (req->tx_count == 0 && req->ns == ns) {
+ /* still waiting to go out, can be moved */
+ /* to another server */
+ request_swap_ns(req, nameserver_pick(base));
+ }
+ req = req->next;
+ } while (req != started_at);
+ }
+ }
+}
+
+static void
+nameserver_up(struct nameserver *const ns)
+{
+ char addrbuf[128];
+ ASSERT_LOCKED(ns->base);
+ if (ns->state) return;
+ log(EVDNS_LOG_MSG, "Nameserver %s is back up",
+ evutil_format_sockaddr_port_(
+ (struct sockaddr *)&ns->address,
+ addrbuf, sizeof(addrbuf)));
+ evtimer_del(&ns->timeout_event);
+ if (ns->probe_request) {
+ evdns_cancel_request(ns->base, ns->probe_request);
+ ns->probe_request = NULL;
+ }
+ ns->state = 1;
+ ns->failed_times = 0;
+ ns->timedout = 0;
+ ns->base->global_good_nameservers++;
+}
+
+static void
+request_trans_id_set(struct request *const req, const u16 trans_id) {
+ req->trans_id = trans_id;
+ *((u16 *) req->request) = htons(trans_id);
+}
+
+/* Called to remove a request from a list and dealloc it. */
+/* head is a pointer to the head of the list it should be */
+/* removed from or NULL if the request isn't in a list. */
+/* when free_handle is one, free the handle as well. */
+static void
+request_finished(struct request *const req, struct request **head, int free_handle) {
+ struct evdns_base *base = req->base;
+ int was_inflight = (head != &base->req_waiting_head);
+ EVDNS_LOCK(base);
+ ASSERT_VALID_REQUEST(req);
+
+ if (head)
+ evdns_request_remove(req, head);
+
+ log(EVDNS_LOG_DEBUG, "Removing timeout for request %p", req);
+ if (was_inflight) {
+ evtimer_del(&req->timeout_event);
+ base->global_requests_inflight--;
+ req->ns->requests_inflight--;
+ } else {
+ base->global_requests_waiting--;
+ }
+ /* it was initialized during request_new / evtimer_assign */
+ event_debug_unassign(&req->timeout_event);
+
+ if (req->ns &&
+ req->ns->requests_inflight == 0 &&
+ req->base->disable_when_inactive) {
+ event_del(&req->ns->event);
+ evtimer_del(&req->ns->timeout_event);
+ }
+
+ if (!req->request_appended) {
+ /* need to free the request data on it's own */
+ mm_free(req->request);
+ } else {
+ /* the request data is appended onto the header */
+ /* so everything gets free()ed when we: */
+ }
+
+ if (req->handle) {
+ EVUTIL_ASSERT(req->handle->current_req == req);
+
+ if (free_handle) {
+ search_request_finished(req->handle);
+ req->handle->current_req = NULL;
+ if (! req->handle->pending_cb) {
+ /* If we're planning to run the callback,
+ * don't free the handle until later. */
+ mm_free(req->handle);
+ }
+ req->handle = NULL; /* If we have a bug, let's crash
+ * early */
+ } else {
+ req->handle->current_req = NULL;
+ }
+ }
+
+ mm_free(req);
+
+ evdns_requests_pump_waiting_queue(base);
+ EVDNS_UNLOCK(base);
+}
+
+/* This is called when a server returns a funny error code. */
+/* We try the request again with another server. */
+/* */
+/* return: */
+/* 0 ok */
+/* 1 failed/reissue is pointless */
+static int
+request_reissue(struct request *req) {
+ const struct nameserver *const last_ns = req->ns;
+ ASSERT_LOCKED(req->base);
+ ASSERT_VALID_REQUEST(req);
+ /* the last nameserver should have been marked as failing */
+ /* by the caller of this function, therefore pick will try */
+ /* not to return it */
+ request_swap_ns(req, nameserver_pick(req->base));
+ if (req->ns == last_ns) {
+ /* ... but pick did return it */
+ /* not a lot of point in trying again with the */
+ /* same server */
+ return 1;
+ }
+
+ req->reissue_count++;
+ req->tx_count = 0;
+ req->transmit_me = 1;
+
+ return 0;
+}
+
+/* this function looks for space on the inflight queue and promotes */
+/* requests from the waiting queue if it can. */
+/* */
+/* TODO: */
+/* add return code, see at nameserver_pick() and other functions. */
+static void
+evdns_requests_pump_waiting_queue(struct evdns_base *base) {
+ ASSERT_LOCKED(base);
+ while (base->global_requests_inflight < base->global_max_requests_inflight &&
+ base->global_requests_waiting) {
+ struct request *req;
+
+ EVUTIL_ASSERT(base->req_waiting_head);
+ req = base->req_waiting_head;
+
+ req->ns = nameserver_pick(base);
+ if (!req->ns)
+ return;
+
+ /* move a request from the waiting queue to the inflight queue */
+ req->ns->requests_inflight++;
+
+ evdns_request_remove(req, &base->req_waiting_head);
+
+ base->global_requests_waiting--;
+ base->global_requests_inflight++;
+
+ request_trans_id_set(req, transaction_id_pick(base));
+
+ evdns_request_insert(req, &REQ_HEAD(base, req->trans_id));
+ evdns_request_transmit(req);
+ evdns_transmit(base);
+ }
+}
+
+/* TODO(nickm) document */
+struct deferred_reply_callback {
+ struct event_callback deferred;
+ struct evdns_request *handle;
+ u8 request_type;
+ u8 have_reply;
+ u32 ttl;
+ u32 err;
+ evdns_callback_type user_callback;
+ struct reply reply;
+};
+
+static void
+reply_run_callback(struct event_callback *d, void *user_pointer)
+{
+ struct deferred_reply_callback *cb =
+ EVUTIL_UPCAST(d, struct deferred_reply_callback, deferred);
+
+ switch (cb->request_type) {
+ case TYPE_A:
+ if (cb->have_reply)
+ cb->user_callback(DNS_ERR_NONE, DNS_IPv4_A,
+ cb->reply.data.a.addrcount, cb->ttl,
+ cb->reply.data.a.addresses,
+ user_pointer);
+ else
+ cb->user_callback(cb->err, 0, 0, cb->ttl, NULL, user_pointer);
+ break;
+ case TYPE_PTR:
+ if (cb->have_reply) {
+ char *name = cb->reply.data.ptr.name;
+ cb->user_callback(DNS_ERR_NONE, DNS_PTR, 1, cb->ttl,
+ &name, user_pointer);
+ } else {
+ cb->user_callback(cb->err, 0, 0, cb->ttl, NULL, user_pointer);
+ }
+ break;
+ case TYPE_AAAA:
+ if (cb->have_reply)
+ cb->user_callback(DNS_ERR_NONE, DNS_IPv6_AAAA,
+ cb->reply.data.aaaa.addrcount, cb->ttl,
+ cb->reply.data.aaaa.addresses,
+ user_pointer);
+ else
+ cb->user_callback(cb->err, 0, 0, cb->ttl, NULL, user_pointer);
+ break;
+ default:
+ EVUTIL_ASSERT(0);
+ }
+
+ if (cb->handle && cb->handle->pending_cb) {
+ mm_free(cb->handle);
+ }
+
+ mm_free(cb);
+}
+
+static void
+reply_schedule_callback(struct request *const req, u32 ttl, u32 err, struct reply *reply)
+{
+ struct deferred_reply_callback *d = mm_calloc(1, sizeof(*d));
+
+ if (!d) {
+ event_warn("%s: Couldn't allocate space for deferred callback.",
+ __func__);
+ return;
+ }
+
+ ASSERT_LOCKED(req->base);
+
+ d->request_type = req->request_type;
+ d->user_callback = req->user_callback;
+ d->ttl = ttl;
+ d->err = err;
+ if (reply) {
+ d->have_reply = 1;
+ memcpy(&d->reply, reply, sizeof(struct reply));
+ }
+
+ if (req->handle) {
+ req->handle->pending_cb = 1;
+ d->handle = req->handle;
+ }
+
+ event_deferred_cb_init_(
+ &d->deferred,
+ event_get_priority(&req->timeout_event),
+ reply_run_callback,
+ req->user_pointer);
+ event_deferred_cb_schedule_(
+ req->base->event_base,
+ &d->deferred);
+}
+
+/* this processes a parsed reply packet */
+static void
+reply_handle(struct request *const req, u16 flags, u32 ttl, struct reply *reply) {
+ int error;
+ char addrbuf[128];
+ static const int error_codes[] = {
+ DNS_ERR_FORMAT, DNS_ERR_SERVERFAILED, DNS_ERR_NOTEXIST,
+ DNS_ERR_NOTIMPL, DNS_ERR_REFUSED
+ };
+
+ ASSERT_LOCKED(req->base);
+ ASSERT_VALID_REQUEST(req);
+
+ if (flags & 0x020f || !reply || !reply->have_answer) {
+ /* there was an error */
+ if (flags & 0x0200) {
+ error = DNS_ERR_TRUNCATED;
+ } else if (flags & 0x000f) {
+ u16 error_code = (flags & 0x000f) - 1;
+ if (error_code > 4) {
+ error = DNS_ERR_UNKNOWN;
+ } else {
+ error = error_codes[error_code];
+ }
+ } else if (reply && !reply->have_answer) {
+ error = DNS_ERR_NODATA;
+ } else {
+ error = DNS_ERR_UNKNOWN;
+ }
+
+ switch (error) {
+ case DNS_ERR_NOTIMPL:
+ case DNS_ERR_REFUSED:
+ /* we regard these errors as marking a bad nameserver */
+ if (req->reissue_count < req->base->global_max_reissues) {
+ char msg[64];
+ evutil_snprintf(msg, sizeof(msg), "Bad response %d (%s)",
+ error, evdns_err_to_string(error));
+ nameserver_failed(req->ns, msg);
+ if (!request_reissue(req)) return;
+ }
+ break;
+ case DNS_ERR_SERVERFAILED:
+ /* rcode 2 (servfailed) sometimes means "we
+ * are broken" and sometimes (with some binds)
+ * means "that request was very confusing."
+ * Treat this as a timeout, not a failure.
+ */
+ log(EVDNS_LOG_DEBUG, "Got a SERVERFAILED from nameserver"
+ "at %s; will allow the request to time out.",
+ evutil_format_sockaddr_port_(
+ (struct sockaddr *)&req->ns->address,
+ addrbuf, sizeof(addrbuf)));
+ /* Call the timeout function */
+ evdns_request_timeout_callback(0, 0, req);
+ return;
+ default:
+ /* we got a good reply from the nameserver: it is up. */
+ if (req->handle == req->ns->probe_request) {
+ /* Avoid double-free */
+ req->ns->probe_request = NULL;
+ }
+
+ nameserver_up(req->ns);
+ }
+
+ if (req->handle->search_state &&
+ req->request_type != TYPE_PTR) {
+ /* if we have a list of domains to search in,
+ * try the next one */
+ if (!search_try_next(req->handle)) {
+ /* a new request was issued so this
+ * request is finished and */
+ /* the user callback will be made when
+ * that request (or a */
+ /* child of it) finishes. */
+ return;
+ }
+ }
+
+ /* all else failed. Pass the failure up */
+ reply_schedule_callback(req, ttl, error, NULL);
+ request_finished(req, &REQ_HEAD(req->base, req->trans_id), 1);
+ } else {
+ /* all ok, tell the user */
+ reply_schedule_callback(req, ttl, 0, reply);
+ if (req->handle == req->ns->probe_request)
+ req->ns->probe_request = NULL; /* Avoid double-free */
+ nameserver_up(req->ns);
+ request_finished(req, &REQ_HEAD(req->base, req->trans_id), 1);
+ }
+}
+
+static int
+name_parse(u8 *packet, int length, int *idx, char *name_out, int name_out_len) {
+ int name_end = -1;
+ int j = *idx;
+ int ptr_count = 0;
+#define GET32(x) do { if (j + 4 > length) goto err; memcpy(&t32_, packet + j, 4); j += 4; x = ntohl(t32_); } while (0)
+#define GET16(x) do { if (j + 2 > length) goto err; memcpy(&t_, packet + j, 2); j += 2; x = ntohs(t_); } while (0)
+#define GET8(x) do { if (j >= length) goto err; x = packet[j++]; } while (0)
+
+ char *cp = name_out;
+ const char *const end = name_out + name_out_len;
+
+ /* Normally, names are a series of length prefixed strings terminated */
+ /* with a length of 0 (the lengths are u8's < 63). */
+ /* However, the length can start with a pair of 1 bits and that */
+ /* means that the next 14 bits are a pointer within the current */
+ /* packet. */
+
+ for (;;) {
+ u8 label_len;
+ GET8(label_len);
+ if (!label_len) break;
+ if (label_len & 0xc0) {
+ u8 ptr_low;
+ GET8(ptr_low);
+ if (name_end < 0) name_end = j;
+ j = (((int)label_len & 0x3f) << 8) + ptr_low;
+ /* Make sure that the target offset is in-bounds. */
+ if (j < 0 || j >= length) return -1;
+ /* If we've jumped more times than there are characters in the
+ * message, we must have a loop. */
+ if (++ptr_count > length) return -1;
+ continue;
+ }
+ if (label_len > 63) return -1;
+ if (cp != name_out) {
+ if (cp + 1 >= end) return -1;
+ *cp++ = '.';
+ }
+ if (cp + label_len >= end) return -1;
+ if (j + label_len > length) return -1;
+ memcpy(cp, packet + j, label_len);
+ cp += label_len;
+ j += label_len;
+ }
+ if (cp >= end) return -1;
+ *cp = '\0';
+ if (name_end < 0)
+ *idx = j;
+ else
+ *idx = name_end;
+ return 0;
+ err:
+ return -1;
+}
+
+/* parses a raw request from a nameserver */
+static int
+reply_parse(struct evdns_base *base, u8 *packet, int length) {
+ int j = 0, k = 0; /* index into packet */
+ u16 t_; /* used by the macros */
+ u32 t32_; /* used by the macros */
+ char tmp_name[256], cmp_name[256]; /* used by the macros */
+ int name_matches = 0;
+
+ u16 trans_id, questions, answers, authority, additional, datalength;
+ u16 flags = 0;
+ u32 ttl, ttl_r = 0xffffffff;
+ struct reply reply;
+ struct request *req = NULL;
+ unsigned int i;
+
+ ASSERT_LOCKED(base);
+
+ GET16(trans_id);
+ GET16(flags);
+ GET16(questions);
+ GET16(answers);
+ GET16(authority);
+ GET16(additional);
+ (void) authority; /* suppress "unused variable" warnings. */
+ (void) additional; /* suppress "unused variable" warnings. */
+
+ req = request_find_from_trans_id(base, trans_id);
+ if (!req) return -1;
+ EVUTIL_ASSERT(req->base == base);
+
+ memset(&reply, 0, sizeof(reply));
+
+ /* If it's not an answer, it doesn't correspond to any request. */
+ if (!(flags & 0x8000)) return -1; /* must be an answer */
+ if ((flags & 0x020f) && (flags & 0x020f) != DNS_ERR_NOTEXIST) {
+ /* there was an error and it's not NXDOMAIN */
+ goto err;
+ }
+ /* if (!answers) return; */ /* must have an answer of some form */
+
+ /* This macro skips a name in the DNS reply. */
+#define SKIP_NAME \
+ do { tmp_name[0] = '\0'; \
+ if (name_parse(packet, length, &j, tmp_name, \
+ sizeof(tmp_name))<0) \
+ goto err; \
+ } while (0)
+
+ reply.type = req->request_type;
+
+ /* skip over each question in the reply */
+ for (i = 0; i < questions; ++i) {
+ /* the question looks like
+ * <label:name><u16:type><u16:class>
+ */
+ tmp_name[0] = '\0';
+ cmp_name[0] = '\0';
+ k = j;
+ if (name_parse(packet, length, &j, tmp_name, sizeof(tmp_name)) < 0)
+ goto err;
+ if (name_parse(req->request, req->request_len, &k,
+ cmp_name, sizeof(cmp_name))<0)
+ goto err;
+ if (!base->global_randomize_case) {
+ if (strcmp(tmp_name, cmp_name) == 0)
+ name_matches = 1;
+ } else {
+ if (evutil_ascii_strcasecmp(tmp_name, cmp_name) == 0)
+ name_matches = 1;
+ }
+
+ j += 4;
+ if (j > length)
+ goto err;
+ }
+
+ if (!name_matches)
+ goto err;
+
+ /* now we have the answer section which looks like
+ * <label:name><u16:type><u16:class><u32:ttl><u16:len><data...>
+ */
+
+ for (i = 0; i < answers; ++i) {
+ u16 type, class;
+
+ SKIP_NAME;
+ GET16(type);
+ GET16(class);
+ GET32(ttl);
+ GET16(datalength);
+
+ if (type == TYPE_A && class == CLASS_INET) {
+ int addrcount, addrtocopy;
+ if (req->request_type != TYPE_A) {
+ j += datalength; continue;
+ }
+ if ((datalength & 3) != 0) /* not an even number of As. */
+ goto err;
+ addrcount = datalength >> 2;
+ addrtocopy = MIN(MAX_V4_ADDRS - reply.data.a.addrcount, (unsigned)addrcount);
+
+ ttl_r = MIN(ttl_r, ttl);
+ /* we only bother with the first four addresses. */
+ if (j + 4*addrtocopy > length) goto err;
+ memcpy(&reply.data.a.addresses[reply.data.a.addrcount],
+ packet + j, 4*addrtocopy);
+ j += 4*addrtocopy;
+ reply.data.a.addrcount += addrtocopy;
+ reply.have_answer = 1;
+ if (reply.data.a.addrcount == MAX_V4_ADDRS) break;
+ } else if (type == TYPE_PTR && class == CLASS_INET) {
+ if (req->request_type != TYPE_PTR) {
+ j += datalength; continue;
+ }
+ if (name_parse(packet, length, &j, reply.data.ptr.name,
+ sizeof(reply.data.ptr.name))<0)
+ goto err;
+ ttl_r = MIN(ttl_r, ttl);
+ reply.have_answer = 1;
+ break;
+ } else if (type == TYPE_CNAME) {
+ char cname[HOST_NAME_MAX];
+ if (!req->put_cname_in_ptr || *req->put_cname_in_ptr) {
+ j += datalength; continue;
+ }
+ if (name_parse(packet, length, &j, cname,
+ sizeof(cname))<0)
+ goto err;
+ *req->put_cname_in_ptr = mm_strdup(cname);
+ } else if (type == TYPE_AAAA && class == CLASS_INET) {
+ int addrcount, addrtocopy;
+ if (req->request_type != TYPE_AAAA) {
+ j += datalength; continue;
+ }
+ if ((datalength & 15) != 0) /* not an even number of AAAAs. */
+ goto err;
+ addrcount = datalength >> 4; /* each address is 16 bytes long */
+ addrtocopy = MIN(MAX_V6_ADDRS - reply.data.aaaa.addrcount, (unsigned)addrcount);
+ ttl_r = MIN(ttl_r, ttl);
+
+ /* we only bother with the first four addresses. */
+ if (j + 16*addrtocopy > length) goto err;
+ memcpy(&reply.data.aaaa.addresses[reply.data.aaaa.addrcount],
+ packet + j, 16*addrtocopy);
+ reply.data.aaaa.addrcount += addrtocopy;
+ j += 16*addrtocopy;
+ reply.have_answer = 1;
+ if (reply.data.aaaa.addrcount == MAX_V6_ADDRS) break;
+ } else {
+ /* skip over any other type of resource */
+ j += datalength;
+ }
+ }
+
+ if (!reply.have_answer) {
+ for (i = 0; i < authority; ++i) {
+ u16 type, class;
+ SKIP_NAME;
+ GET16(type);
+ GET16(class);
+ GET32(ttl);
+ GET16(datalength);
+ if (type == TYPE_SOA && class == CLASS_INET) {
+ u32 serial, refresh, retry, expire, minimum;
+ SKIP_NAME;
+ SKIP_NAME;
+ GET32(serial);
+ GET32(refresh);
+ GET32(retry);
+ GET32(expire);
+ GET32(minimum);
+ (void)expire;
+ (void)retry;
+ (void)refresh;
+ (void)serial;
+ ttl_r = MIN(ttl_r, ttl);
+ ttl_r = MIN(ttl_r, minimum);
+ } else {
+ /* skip over any other type of resource */
+ j += datalength;
+ }
+ }
+ }
+
+ if (ttl_r == 0xffffffff)
+ ttl_r = 0;
+
+ reply_handle(req, flags, ttl_r, &reply);
+ return 0;
+ err:
+ if (req)
+ reply_handle(req, flags, 0, NULL);
+ return -1;
+}
+
+/* Parse a raw request (packet,length) sent to a nameserver port (port) from */
+/* a DNS client (addr,addrlen), and if it's well-formed, call the corresponding */
+/* callback. */
+static int
+request_parse(u8 *packet, int length, struct evdns_server_port *port, struct sockaddr *addr, ev_socklen_t addrlen)
+{
+ int j = 0; /* index into packet */
+ u16 t_; /* used by the macros */
+ char tmp_name[256]; /* used by the macros */
+
+ int i;
+ u16 trans_id, flags, questions, answers, authority, additional;
+ struct server_request *server_req = NULL;
+
+ ASSERT_LOCKED(port);
+
+ /* Get the header fields */
+ GET16(trans_id);
+ GET16(flags);
+ GET16(questions);
+ GET16(answers);
+ GET16(authority);
+ GET16(additional);
+ (void)answers;
+ (void)additional;
+ (void)authority;
+
+ if (flags & 0x8000) return -1; /* Must not be an answer. */
+ flags &= 0x0110; /* Only RD and CD get preserved. */
+
+ server_req = mm_malloc(sizeof(struct server_request));
+ if (server_req == NULL) return -1;
+ memset(server_req, 0, sizeof(struct server_request));
+
+ server_req->trans_id = trans_id;
+ memcpy(&server_req->addr, addr, addrlen);
+ server_req->addrlen = addrlen;
+
+ server_req->base.flags = flags;
+ server_req->base.nquestions = 0;
+ server_req->base.questions = mm_calloc(sizeof(struct evdns_server_question *), questions);
+ if (server_req->base.questions == NULL)
+ goto err;
+
+ for (i = 0; i < questions; ++i) {
+ u16 type, class;
+ struct evdns_server_question *q;
+ int namelen;
+ if (name_parse(packet, length, &j, tmp_name, sizeof(tmp_name))<0)
+ goto err;
+ GET16(type);
+ GET16(class);
+ namelen = (int)strlen(tmp_name);
+ q = mm_malloc(sizeof(struct evdns_server_question) + namelen);
+ if (!q)
+ goto err;
+ q->type = type;
+ q->dns_question_class = class;
+ memcpy(q->name, tmp_name, namelen+1);
+ server_req->base.questions[server_req->base.nquestions++] = q;
+ }
+
+ /* Ignore answers, authority, and additional. */
+
+ server_req->port = port;
+ port->refcnt++;
+
+ /* Only standard queries are supported. */
+ if (flags & 0x7800) {
+ evdns_server_request_respond(&(server_req->base), DNS_ERR_NOTIMPL);
+ return -1;
+ }
+
+ port->user_callback(&(server_req->base), port->user_data);
+
+ return 0;
+err:
+ if (server_req) {
+ if (server_req->base.questions) {
+ for (i = 0; i < server_req->base.nquestions; ++i)
+ mm_free(server_req->base.questions[i]);
+ mm_free(server_req->base.questions);
+ }
+ mm_free(server_req);
+ }
+ return -1;
+
+#undef SKIP_NAME
+#undef GET32
+#undef GET16
+#undef GET8
+}
+
+
+void
+evdns_set_transaction_id_fn(ev_uint16_t (*fn)(void))
+{
+}
+
+void
+evdns_set_random_bytes_fn(void (*fn)(char *, size_t))
+{
+}
+
+/* Try to choose a strong transaction id which isn't already in flight */
+static u16
+transaction_id_pick(struct evdns_base *base) {
+ ASSERT_LOCKED(base);
+ for (;;) {
+ u16 trans_id;
+ evutil_secure_rng_get_bytes(&trans_id, sizeof(trans_id));
+
+ if (trans_id == 0xffff) continue;
+ /* now check to see if that id is already inflight */
+ if (request_find_from_trans_id(base, trans_id) == NULL)
+ return trans_id;
+ }
+}
+
+/* choose a namesever to use. This function will try to ignore */
+/* nameservers which we think are down and load balance across the rest */
+/* by updating the server_head global each time. */
+static struct nameserver *
+nameserver_pick(struct evdns_base *base) {
+ struct nameserver *started_at = base->server_head, *picked;
+ ASSERT_LOCKED(base);
+ if (!base->server_head) return NULL;
+
+ /* if we don't have any good nameservers then there's no */
+ /* point in trying to find one. */
+ if (!base->global_good_nameservers) {
+ base->server_head = base->server_head->next;
+ return base->server_head;
+ }
+
+ /* remember that nameservers are in a circular list */
+ for (;;) {
+ if (base->server_head->state) {
+ /* we think this server is currently good */
+ picked = base->server_head;
+ base->server_head = base->server_head->next;
+ return picked;
+ }
+
+ base->server_head = base->server_head->next;
+ if (base->server_head == started_at) {
+ /* all the nameservers seem to be down */
+ /* so we just return this one and hope for the */
+ /* best */
+ EVUTIL_ASSERT(base->global_good_nameservers == 0);
+ picked = base->server_head;
+ base->server_head = base->server_head->next;
+ return picked;
+ }
+ }
+}
+
+/* this is called when a namesever socket is ready for reading */
+static void
+nameserver_read(struct nameserver *ns) {
+ struct sockaddr_storage ss;
+ ev_socklen_t addrlen = sizeof(ss);
+ u8 packet[1500];
+ char addrbuf[128];
+ ASSERT_LOCKED(ns->base);
+
+ for (;;) {
+ const int r = recvfrom(ns->socket, (void*)packet,
+ sizeof(packet), 0,
+ (struct sockaddr*)&ss, &addrlen);
+ if (r < 0) {
+ int err = evutil_socket_geterror(ns->socket);
+ if (EVUTIL_ERR_RW_RETRIABLE(err))
+ return;
+ nameserver_failed(ns,
+ evutil_socket_error_to_string(err));
+ return;
+ }
+ if (evutil_sockaddr_cmp((struct sockaddr*)&ss,
+ (struct sockaddr*)&ns->address, 0)) {
+ log(EVDNS_LOG_WARN, "Address mismatch on received "
+ "DNS packet. Apparent source was %s",
+ evutil_format_sockaddr_port_(
+ (struct sockaddr *)&ss,
+ addrbuf, sizeof(addrbuf)));
+ return;
+ }
+
+ ns->timedout = 0;
+ reply_parse(ns->base, packet, r);
+ }
+}
+
+/* Read a packet from a DNS client on a server port s, parse it, and */
+/* act accordingly. */
+static void
+server_port_read(struct evdns_server_port *s) {
+ u8 packet[1500];
+ struct sockaddr_storage addr;
+ ev_socklen_t addrlen;
+ int r;
+ ASSERT_LOCKED(s);
+
+ for (;;) {
+ addrlen = sizeof(struct sockaddr_storage);
+ r = recvfrom(s->socket, (void*)packet, sizeof(packet), 0,
+ (struct sockaddr*) &addr, &addrlen);
+ if (r < 0) {
+ int err = evutil_socket_geterror(s->socket);
+ if (EVUTIL_ERR_RW_RETRIABLE(err))
+ return;
+ log(EVDNS_LOG_WARN,
+ "Error %s (%d) while reading request.",
+ evutil_socket_error_to_string(err), err);
+ return;
+ }
+ request_parse(packet, r, s, (struct sockaddr*) &addr, addrlen);
+ }
+}
+
+/* Try to write all pending replies on a given DNS server port. */
+static void
+server_port_flush(struct evdns_server_port *port)
+{
+ struct server_request *req = port->pending_replies;
+ ASSERT_LOCKED(port);
+ while (req) {
+ int r = sendto(port->socket, req->response, (int)req->response_len, 0,
+ (struct sockaddr*) &req->addr, (ev_socklen_t)req->addrlen);
+ if (r < 0) {
+ int err = evutil_socket_geterror(port->socket);
+ if (EVUTIL_ERR_RW_RETRIABLE(err))
+ return;
+ log(EVDNS_LOG_WARN, "Error %s (%d) while writing response to port; dropping", evutil_socket_error_to_string(err), err);
+ }
+ if (server_request_free(req)) {
+ /* we released the last reference to req->port. */
+ return;
+ } else {
+ EVUTIL_ASSERT(req != port->pending_replies);
+ req = port->pending_replies;
+ }
+ }
+
+ /* We have no more pending requests; stop listening for 'writeable' events. */
+ (void) event_del(&port->event);
+ event_assign(&port->event, port->event_base,
+ port->socket, EV_READ | EV_PERSIST,
+ server_port_ready_callback, port);
+
+ if (event_add(&port->event, NULL) < 0) {
+ log(EVDNS_LOG_WARN, "Error from libevent when adding event for DNS server.");
+ /* ???? Do more? */
+ }
+}
+
+/* set if we are waiting for the ability to write to this server. */
+/* if waiting is true then we ask libevent for EV_WRITE events, otherwise */
+/* we stop these events. */
+static void
+nameserver_write_waiting(struct nameserver *ns, char waiting) {
+ ASSERT_LOCKED(ns->base);
+ if (ns->write_waiting == waiting) return;
+
+ ns->write_waiting = waiting;
+ (void) event_del(&ns->event);
+ event_assign(&ns->event, ns->base->event_base,
+ ns->socket, EV_READ | (waiting ? EV_WRITE : 0) | EV_PERSIST,
+ nameserver_ready_callback, ns);
+ if (event_add(&ns->event, NULL) < 0) {
+ char addrbuf[128];
+ log(EVDNS_LOG_WARN, "Error from libevent when adding event for %s",
+ evutil_format_sockaddr_port_(
+ (struct sockaddr *)&ns->address,
+ addrbuf, sizeof(addrbuf)));
+ /* ???? Do more? */
+ }
+}
+
+/* a callback function. Called by libevent when the kernel says that */
+/* a nameserver socket is ready for writing or reading */
+static void
+nameserver_ready_callback(evutil_socket_t fd, short events, void *arg) {
+ struct nameserver *ns = (struct nameserver *) arg;
+ (void)fd;
+
+ EVDNS_LOCK(ns->base);
+ if (events & EV_WRITE) {
+ ns->choked = 0;
+ if (!evdns_transmit(ns->base)) {
+ nameserver_write_waiting(ns, 0);
+ }
+ }
+ if (events & EV_READ) {
+ nameserver_read(ns);
+ }
+ EVDNS_UNLOCK(ns->base);
+}
+
+/* a callback function. Called by libevent when the kernel says that */
+/* a server socket is ready for writing or reading. */
+static void
+server_port_ready_callback(evutil_socket_t fd, short events, void *arg) {
+ struct evdns_server_port *port = (struct evdns_server_port *) arg;
+ (void) fd;
+
+ EVDNS_LOCK(port);
+ if (events & EV_WRITE) {
+ port->choked = 0;
+ server_port_flush(port);
+ }
+ if (events & EV_READ) {
+ server_port_read(port);
+ }
+ EVDNS_UNLOCK(port);
+}
+
+/* This is an inefficient representation; only use it via the dnslabel_table_*
+ * functions, so that is can be safely replaced with something smarter later. */
+#define MAX_LABELS 128
+/* Structures used to implement name compression */
+struct dnslabel_entry { char *v; off_t pos; };
+struct dnslabel_table {
+ int n_labels; /* number of current entries */
+ /* map from name to position in message */
+ struct dnslabel_entry labels[MAX_LABELS];
+};
+
+/* Initialize dnslabel_table. */
+static void
+dnslabel_table_init(struct dnslabel_table *table)
+{
+ table->n_labels = 0;
+}
+
+/* Free all storage held by table, but not the table itself. */
+static void
+dnslabel_clear(struct dnslabel_table *table)
+{
+ int i;
+ for (i = 0; i < table->n_labels; ++i)
+ mm_free(table->labels[i].v);
+ table->n_labels = 0;
+}
+
+/* return the position of the label in the current message, or -1 if the label */
+/* hasn't been used yet. */
+static int
+dnslabel_table_get_pos(const struct dnslabel_table *table, const char *label)
+{
+ int i;
+ for (i = 0; i < table->n_labels; ++i) {
+ if (!strcmp(label, table->labels[i].v))
+ return table->labels[i].pos;
+ }
+ return -1;
+}
+
+/* remember that we've used the label at position pos */
+static int
+dnslabel_table_add(struct dnslabel_table *table, const char *label, off_t pos)
+{
+ char *v;
+ int p;
+ if (table->n_labels == MAX_LABELS)
+ return (-1);
+ v = mm_strdup(label);
+ if (v == NULL)
+ return (-1);
+ p = table->n_labels++;
+ table->labels[p].v = v;
+ table->labels[p].pos = pos;
+
+ return (0);
+}
+
+/* Converts a string to a length-prefixed set of DNS labels, starting */
+/* at buf[j]. name and buf must not overlap. name_len should be the length */
+/* of name. table is optional, and is used for compression. */
+/* */
+/* Input: abc.def */
+/* Output: <3>abc<3>def<0> */
+/* */
+/* Returns the first index after the encoded name, or negative on error. */
+/* -1 label was > 63 bytes */
+/* -2 name too long to fit in buffer. */
+/* */
+static off_t
+dnsname_to_labels(u8 *const buf, size_t buf_len, off_t j,
+ const char *name, const size_t name_len,
+ struct dnslabel_table *table) {
+ const char *end = name + name_len;
+ int ref = 0;
+ u16 t_;
+
+#define APPEND16(x) do { \
+ if (j + 2 > (off_t)buf_len) \
+ goto overflow; \
+ t_ = htons(x); \
+ memcpy(buf + j, &t_, 2); \
+ j += 2; \
+ } while (0)
+#define APPEND32(x) do { \
+ if (j + 4 > (off_t)buf_len) \
+ goto overflow; \
+ t32_ = htonl(x); \
+ memcpy(buf + j, &t32_, 4); \
+ j += 4; \
+ } while (0)
+
+ if (name_len > 255) return -2;
+
+ for (;;) {
+ const char *const start = name;
+ if (table && (ref = dnslabel_table_get_pos(table, name)) >= 0) {
+ APPEND16(ref | 0xc000);
+ return j;
+ }
+ name = strchr(name, '.');
+ if (!name) {
+ const size_t label_len = end - start;
+ if (label_len > 63) return -1;
+ if ((size_t)(j+label_len+1) > buf_len) return -2;
+ if (table) dnslabel_table_add(table, start, j);
+ buf[j++] = (ev_uint8_t)label_len;
+
+ memcpy(buf + j, start, label_len);
+ j += (int) label_len;
+ break;
+ } else {
+ /* append length of the label. */
+ const size_t label_len = name - start;
+ if (label_len > 63) return -1;
+ if ((size_t)(j+label_len+1) > buf_len) return -2;
+ if (table) dnslabel_table_add(table, start, j);
+ buf[j++] = (ev_uint8_t)label_len;
+
+ memcpy(buf + j, start, label_len);
+ j += (int) label_len;
+ /* hop over the '.' */
+ name++;
+ }
+ }
+
+ /* the labels must be terminated by a 0. */
+ /* It's possible that the name ended in a . */
+ /* in which case the zero is already there */
+ if (!j || buf[j-1]) buf[j++] = 0;
+ return j;
+ overflow:
+ return (-2);
+}
+
+/* Finds the length of a dns request for a DNS name of the given */
+/* length. The actual request may be smaller than the value returned */
+/* here */
+static size_t
+evdns_request_len(const size_t name_len) {
+ return 96 + /* length of the DNS standard header */
+ name_len + 2 +
+ 4; /* space for the resource type */
+}
+
+/* build a dns request packet into buf. buf should be at least as long */
+/* as evdns_request_len told you it should be. */
+/* */
+/* Returns the amount of space used. Negative on error. */
+static int
+evdns_request_data_build(const char *const name, const size_t name_len,
+ const u16 trans_id, const u16 type, const u16 class,
+ u8 *const buf, size_t buf_len) {
+ off_t j = 0; /* current offset into buf */
+ u16 t_; /* used by the macros */
+
+ APPEND16(trans_id);
+ APPEND16(0x0100); /* standard query, recusion needed */
+ APPEND16(1); /* one question */
+ APPEND16(0); /* no answers */
+ APPEND16(0); /* no authority */
+ APPEND16(0); /* no additional */
+
+ j = dnsname_to_labels(buf, buf_len, j, name, name_len, NULL);
+ if (j < 0) {
+ return (int)j;
+ }
+
+ APPEND16(type);
+ APPEND16(class);
+
+ return (int)j;
+ overflow:
+ return (-1);
+}
+
+/* exported function */
+struct evdns_server_port *
+evdns_add_server_port_with_base(struct event_base *base, evutil_socket_t socket, int flags, evdns_request_callback_fn_type cb, void *user_data)
+{
+ struct evdns_server_port *port;
+ if (flags)
+ return NULL; /* flags not yet implemented */
+ if (!(port = mm_malloc(sizeof(struct evdns_server_port))))
+ return NULL;
+ memset(port, 0, sizeof(struct evdns_server_port));
+
+
+ port->socket = socket;
+ port->refcnt = 1;
+ port->choked = 0;
+ port->closing = 0;
+ port->user_callback = cb;
+ port->user_data = user_data;
+ port->pending_replies = NULL;
+ port->event_base = base;
+
+ event_assign(&port->event, port->event_base,
+ port->socket, EV_READ | EV_PERSIST,
+ server_port_ready_callback, port);
+ if (event_add(&port->event, NULL) < 0) {
+ mm_free(port);
+ return NULL;
+ }
+ EVTHREAD_ALLOC_LOCK(port->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ return port;
+}
+
+struct evdns_server_port *
+evdns_add_server_port(evutil_socket_t socket, int flags, evdns_request_callback_fn_type cb, void *user_data)
+{
+ return evdns_add_server_port_with_base(NULL, socket, flags, cb, user_data);
+}
+
+/* exported function */
+void
+evdns_close_server_port(struct evdns_server_port *port)
+{
+ EVDNS_LOCK(port);
+ if (--port->refcnt == 0) {
+ EVDNS_UNLOCK(port);
+ server_port_free(port);
+ } else {
+ port->closing = 1;
+ }
+}
+
+/* exported function */
+int
+evdns_server_request_add_reply(struct evdns_server_request *req_, int section, const char *name, int type, int class, int ttl, int datalen, int is_name, const char *data)
+{
+ struct server_request *req = TO_SERVER_REQUEST(req_);
+ struct server_reply_item **itemp, *item;
+ int *countp;
+ int result = -1;
+
+ EVDNS_LOCK(req->port);
+ if (req->response) /* have we already answered? */
+ goto done;
+
+ switch (section) {
+ case EVDNS_ANSWER_SECTION:
+ itemp = &req->answer;
+ countp = &req->n_answer;
+ break;
+ case EVDNS_AUTHORITY_SECTION:
+ itemp = &req->authority;
+ countp = &req->n_authority;
+ break;
+ case EVDNS_ADDITIONAL_SECTION:
+ itemp = &req->additional;
+ countp = &req->n_additional;
+ break;
+ default:
+ goto done;
+ }
+ while (*itemp) {
+ itemp = &((*itemp)->next);
+ }
+ item = mm_malloc(sizeof(struct server_reply_item));
+ if (!item)
+ goto done;
+ item->next = NULL;
+ if (!(item->name = mm_strdup(name))) {
+ mm_free(item);
+ goto done;
+ }
+ item->type = type;
+ item->dns_question_class = class;
+ item->ttl = ttl;
+ item->is_name = is_name != 0;
+ item->datalen = 0;
+ item->data = NULL;
+ if (data) {
+ if (item->is_name) {
+ if (!(item->data = mm_strdup(data))) {
+ mm_free(item->name);
+ mm_free(item);
+ goto done;
+ }
+ item->datalen = (u16)-1;
+ } else {
+ if (!(item->data = mm_malloc(datalen))) {
+ mm_free(item->name);
+ mm_free(item);
+ goto done;
+ }
+ item->datalen = datalen;
+ memcpy(item->data, data, datalen);
+ }
+ }
+
+ *itemp = item;
+ ++(*countp);
+ result = 0;
+done:
+ EVDNS_UNLOCK(req->port);
+ return result;
+}
+
+/* exported function */
+int
+evdns_server_request_add_a_reply(struct evdns_server_request *req, const char *name, int n, const void *addrs, int ttl)
+{
+ return evdns_server_request_add_reply(
+ req, EVDNS_ANSWER_SECTION, name, TYPE_A, CLASS_INET,
+ ttl, n*4, 0, addrs);
+}
+
+/* exported function */
+int
+evdns_server_request_add_aaaa_reply(struct evdns_server_request *req, const char *name, int n, const void *addrs, int ttl)
+{
+ return evdns_server_request_add_reply(
+ req, EVDNS_ANSWER_SECTION, name, TYPE_AAAA, CLASS_INET,
+ ttl, n*16, 0, addrs);
+}
+
+/* exported function */
+int
+evdns_server_request_add_ptr_reply(struct evdns_server_request *req, struct in_addr *in, const char *inaddr_name, const char *hostname, int ttl)
+{
+ u32 a;
+ char buf[32];
+ if (in && inaddr_name)
+ return -1;
+ else if (!in && !inaddr_name)
+ return -1;
+ if (in) {
+ a = ntohl(in->s_addr);
+ evutil_snprintf(buf, sizeof(buf), "%d.%d.%d.%d.in-addr.arpa",
+ (int)(u8)((a )&0xff),
+ (int)(u8)((a>>8 )&0xff),
+ (int)(u8)((a>>16)&0xff),
+ (int)(u8)((a>>24)&0xff));
+ inaddr_name = buf;
+ }
+ return evdns_server_request_add_reply(
+ req, EVDNS_ANSWER_SECTION, inaddr_name, TYPE_PTR, CLASS_INET,
+ ttl, -1, 1, hostname);
+}
+
+/* exported function */
+int
+evdns_server_request_add_cname_reply(struct evdns_server_request *req, const char *name, const char *cname, int ttl)
+{
+ return evdns_server_request_add_reply(
+ req, EVDNS_ANSWER_SECTION, name, TYPE_CNAME, CLASS_INET,
+ ttl, -1, 1, cname);
+}
+
+/* exported function */
+void
+evdns_server_request_set_flags(struct evdns_server_request *exreq, int flags)
+{
+ struct server_request *req = TO_SERVER_REQUEST(exreq);
+ req->base.flags &= ~(EVDNS_FLAGS_AA|EVDNS_FLAGS_RD);
+ req->base.flags |= flags;
+}
+
+static int
+evdns_server_request_format_response(struct server_request *req, int err)
+{
+ unsigned char buf[1500];
+ size_t buf_len = sizeof(buf);
+ off_t j = 0, r;
+ u16 t_;
+ u32 t32_;
+ int i;
+ u16 flags;
+ struct dnslabel_table table;
+
+ if (err < 0 || err > 15) return -1;
+
+ /* Set response bit and error code; copy OPCODE and RD fields from
+ * question; copy RA and AA if set by caller. */
+ flags = req->base.flags;
+ flags |= (0x8000 | err);
+
+ dnslabel_table_init(&table);
+ APPEND16(req->trans_id);
+ APPEND16(flags);
+ APPEND16(req->base.nquestions);
+ APPEND16(req->n_answer);
+ APPEND16(req->n_authority);
+ APPEND16(req->n_additional);
+
+ /* Add questions. */
+ for (i=0; i < req->base.nquestions; ++i) {
+ const char *s = req->base.questions[i]->name;
+ j = dnsname_to_labels(buf, buf_len, j, s, strlen(s), &table);
+ if (j < 0) {
+ dnslabel_clear(&table);
+ return (int) j;
+ }
+ APPEND16(req->base.questions[i]->type);
+ APPEND16(req->base.questions[i]->dns_question_class);
+ }
+
+ /* Add answer, authority, and additional sections. */
+ for (i=0; i<3; ++i) {
+ struct server_reply_item *item;
+ if (i==0)
+ item = req->answer;
+ else if (i==1)
+ item = req->authority;
+ else
+ item = req->additional;
+ while (item) {
+ r = dnsname_to_labels(buf, buf_len, j, item->name, strlen(item->name), &table);
+ if (r < 0)
+ goto overflow;
+ j = r;
+
+ APPEND16(item->type);
+ APPEND16(item->dns_question_class);
+ APPEND32(item->ttl);
+ if (item->is_name) {
+ off_t len_idx = j, name_start;
+ j += 2;
+ name_start = j;
+ r = dnsname_to_labels(buf, buf_len, j, item->data, strlen(item->data), &table);
+ if (r < 0)
+ goto overflow;
+ j = r;
+ t_ = htons( (short) (j-name_start) );
+ memcpy(buf+len_idx, &t_, 2);
+ } else {
+ APPEND16(item->datalen);
+ if (j+item->datalen > (off_t)buf_len)
+ goto overflow;
+ memcpy(buf+j, item->data, item->datalen);
+ j += item->datalen;
+ }
+ item = item->next;
+ }
+ }
+
+ if (j > 512) {
+overflow:
+ j = 512;
+ buf[2] |= 0x02; /* set the truncated bit. */
+ }
+
+ req->response_len = j;
+
+ if (!(req->response = mm_malloc(req->response_len))) {
+ server_request_free_answers(req);
+ dnslabel_clear(&table);
+ return (-1);
+ }
+ memcpy(req->response, buf, req->response_len);
+ server_request_free_answers(req);
+ dnslabel_clear(&table);
+ return (0);
+}
+
+/* exported function */
+int
+evdns_server_request_respond(struct evdns_server_request *req_, int err)
+{
+ struct server_request *req = TO_SERVER_REQUEST(req_);
+ struct evdns_server_port *port = req->port;
+ int r = -1;
+
+ EVDNS_LOCK(port);
+ if (!req->response) {
+ if ((r = evdns_server_request_format_response(req, err))<0)
+ goto done;
+ }
+
+ r = sendto(port->socket, req->response, (int)req->response_len, 0,
+ (struct sockaddr*) &req->addr, (ev_socklen_t)req->addrlen);
+ if (r<0) {
+ int sock_err = evutil_socket_geterror(port->socket);
+ if (EVUTIL_ERR_RW_RETRIABLE(sock_err))
+ goto done;
+
+ if (port->pending_replies) {
+ req->prev_pending = port->pending_replies->prev_pending;
+ req->next_pending = port->pending_replies;
+ req->prev_pending->next_pending =
+ req->next_pending->prev_pending = req;
+ } else {
+ req->prev_pending = req->next_pending = req;
+ port->pending_replies = req;
+ port->choked = 1;
+
+ (void) event_del(&port->event);
+ event_assign(&port->event, port->event_base, port->socket, (port->closing?0:EV_READ) | EV_WRITE | EV_PERSIST, server_port_ready_callback, port);
+
+ if (event_add(&port->event, NULL) < 0) {
+ log(EVDNS_LOG_WARN, "Error from libevent when adding event for DNS server");
+ }
+
+ }
+
+ r = 1;
+ goto done;
+ }
+ if (server_request_free(req)) {
+ r = 0;
+ goto done;
+ }
+
+ if (port->pending_replies)
+ server_port_flush(port);
+
+ r = 0;
+done:
+ EVDNS_UNLOCK(port);
+ return r;
+}
+
+/* Free all storage held by RRs in req. */
+static void
+server_request_free_answers(struct server_request *req)
+{
+ struct server_reply_item *victim, *next, **list;
+ int i;
+ for (i = 0; i < 3; ++i) {
+ if (i==0)
+ list = &req->answer;
+ else if (i==1)
+ list = &req->authority;
+ else
+ list = &req->additional;
+
+ victim = *list;
+ while (victim) {
+ next = victim->next;
+ mm_free(victim->name);
+ if (victim->data)
+ mm_free(victim->data);
+ mm_free(victim);
+ victim = next;
+ }
+ *list = NULL;
+ }
+}
+
+/* Free all storage held by req, and remove links to it. */
+/* return true iff we just wound up freeing the server_port. */
+static int
+server_request_free(struct server_request *req)
+{
+ int i, rc=1, lock=0;
+ if (req->base.questions) {
+ for (i = 0; i < req->base.nquestions; ++i)
+ mm_free(req->base.questions[i]);
+ mm_free(req->base.questions);
+ }
+
+ if (req->port) {
+ EVDNS_LOCK(req->port);
+ lock=1;
+ if (req->port->pending_replies == req) {
+ if (req->next_pending && req->next_pending != req)
+ req->port->pending_replies = req->next_pending;
+ else
+ req->port->pending_replies = NULL;
+ }
+ rc = --req->port->refcnt;
+ }
+
+ if (req->response) {
+ mm_free(req->response);
+ }
+
+ server_request_free_answers(req);
+
+ if (req->next_pending && req->next_pending != req) {
+ req->next_pending->prev_pending = req->prev_pending;
+ req->prev_pending->next_pending = req->next_pending;
+ }
+
+ if (rc == 0) {
+ EVDNS_UNLOCK(req->port); /* ????? nickm */
+ server_port_free(req->port);
+ mm_free(req);
+ return (1);
+ }
+ if (lock)
+ EVDNS_UNLOCK(req->port);
+ mm_free(req);
+ return (0);
+}
+
+/* Free all storage held by an evdns_server_port. Only called when */
+static void
+server_port_free(struct evdns_server_port *port)
+{
+ EVUTIL_ASSERT(port);
+ EVUTIL_ASSERT(!port->refcnt);
+ EVUTIL_ASSERT(!port->pending_replies);
+ if (port->socket > 0) {
+ evutil_closesocket(port->socket);
+ port->socket = -1;
+ }
+ (void) event_del(&port->event);
+ event_debug_unassign(&port->event);
+ EVTHREAD_FREE_LOCK(port->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ mm_free(port);
+}
+
+/* exported function */
+int
+evdns_server_request_drop(struct evdns_server_request *req_)
+{
+ struct server_request *req = TO_SERVER_REQUEST(req_);
+ server_request_free(req);
+ return 0;
+}
+
+/* exported function */
+int
+evdns_server_request_get_requesting_addr(struct evdns_server_request *req_, struct sockaddr *sa, int addr_len)
+{
+ struct server_request *req = TO_SERVER_REQUEST(req_);
+ if (addr_len < (int)req->addrlen)
+ return -1;
+ memcpy(sa, &(req->addr), req->addrlen);
+ return req->addrlen;
+}
+
+#undef APPEND16
+#undef APPEND32
+
+/* this is a libevent callback function which is called when a request */
+/* has timed out. */
+static void
+evdns_request_timeout_callback(evutil_socket_t fd, short events, void *arg) {
+ struct request *const req = (struct request *) arg;
+ struct evdns_base *base = req->base;
+
+ (void) fd;
+ (void) events;
+
+ log(EVDNS_LOG_DEBUG, "Request %p timed out", arg);
+ EVDNS_LOCK(base);
+
+ if (req->tx_count >= req->base->global_max_retransmits) {
+ struct nameserver *ns = req->ns;
+ /* this request has failed */
+ log(EVDNS_LOG_DEBUG, "Giving up on request %p; tx_count==%d",
+ arg, req->tx_count);
+ reply_schedule_callback(req, 0, DNS_ERR_TIMEOUT, NULL);
+
+ request_finished(req, &REQ_HEAD(req->base, req->trans_id), 1);
+ nameserver_failed(ns, "request timed out.");
+ } else {
+ /* retransmit it */
+ log(EVDNS_LOG_DEBUG, "Retransmitting request %p; tx_count==%d",
+ arg, req->tx_count);
+ (void) evtimer_del(&req->timeout_event);
+ request_swap_ns(req, nameserver_pick(base));
+ evdns_request_transmit(req);
+
+ req->ns->timedout++;
+ if (req->ns->timedout > req->base->global_max_nameserver_timeout) {
+ req->ns->timedout = 0;
+ nameserver_failed(req->ns, "request timed out.");
+ }
+ }
+
+ EVDNS_UNLOCK(base);
+}
+
+/* try to send a request to a given server. */
+/* */
+/* return: */
+/* 0 ok */
+/* 1 temporary failure */
+/* 2 other failure */
+static int
+evdns_request_transmit_to(struct request *req, struct nameserver *server) {
+ int r;
+ ASSERT_LOCKED(req->base);
+ ASSERT_VALID_REQUEST(req);
+
+ if (server->requests_inflight == 1 &&
+ req->base->disable_when_inactive &&
+ event_add(&server->event, NULL) < 0) {
+ return 1;
+ }
+
+ r = sendto(server->socket, (void*)req->request, req->request_len, 0,
+ (struct sockaddr *)&server->address, server->addrlen);
+ if (r < 0) {
+ int err = evutil_socket_geterror(server->socket);
+ if (EVUTIL_ERR_RW_RETRIABLE(err))
+ return 1;
+ nameserver_failed(req->ns, evutil_socket_error_to_string(err));
+ return 2;
+ } else if (r != (int)req->request_len) {
+ return 1; /* short write */
+ } else {
+ return 0;
+ }
+}
+
+/* try to send a request, updating the fields of the request */
+/* as needed */
+/* */
+/* return: */
+/* 0 ok */
+/* 1 failed */
+static int
+evdns_request_transmit(struct request *req) {
+ int retcode = 0, r;
+
+ ASSERT_LOCKED(req->base);
+ ASSERT_VALID_REQUEST(req);
+ /* if we fail to send this packet then this flag marks it */
+ /* for evdns_transmit */
+ req->transmit_me = 1;
+ EVUTIL_ASSERT(req->trans_id != 0xffff);
+
+ if (!req->ns)
+ {
+ /* unable to transmit request if no nameservers */
+ return 1;
+ }
+
+ if (req->ns->choked) {
+ /* don't bother trying to write to a socket */
+ /* which we have had EAGAIN from */
+ return 1;
+ }
+
+ r = evdns_request_transmit_to(req, req->ns);
+ switch (r) {
+ case 1:
+ /* temp failure */
+ req->ns->choked = 1;
+ nameserver_write_waiting(req->ns, 1);
+ return 1;
+ case 2:
+ /* failed to transmit the request entirely. */
+ retcode = 1;
+ /* fall through: we'll set a timeout, which will time out,
+ * and make us retransmit the request anyway. */
+ default:
+ /* all ok */
+ log(EVDNS_LOG_DEBUG,
+ "Setting timeout for request %p, sent to nameserver %p", req, req->ns);
+ if (evtimer_add(&req->timeout_event, &req->base->global_timeout) < 0) {
+ log(EVDNS_LOG_WARN,
+ "Error from libevent when adding timer for request %p",
+ req);
+ /* ???? Do more? */
+ }
+ req->tx_count++;
+ req->transmit_me = 0;
+ return retcode;
+ }
+}
+
+static void
+nameserver_probe_callback(int result, char type, int count, int ttl, void *addresses, void *arg) {
+ struct nameserver *const ns = (struct nameserver *) arg;
+ (void) type;
+ (void) count;
+ (void) ttl;
+ (void) addresses;
+
+ if (result == DNS_ERR_CANCEL) {
+ /* We canceled this request because the nameserver came up
+ * for some other reason. Do not change our opinion about
+ * the nameserver. */
+ return;
+ }
+
+ EVDNS_LOCK(ns->base);
+ ns->probe_request = NULL;
+ if (result == DNS_ERR_NONE || result == DNS_ERR_NOTEXIST) {
+ /* this is a good reply */
+ nameserver_up(ns);
+ } else {
+ nameserver_probe_failed(ns);
+ }
+ EVDNS_UNLOCK(ns->base);
+}
+
+static void
+nameserver_send_probe(struct nameserver *const ns) {
+ struct evdns_request *handle;
+ struct request *req;
+ char addrbuf[128];
+ /* here we need to send a probe to a given nameserver */
+ /* in the hope that it is up now. */
+
+ ASSERT_LOCKED(ns->base);
+ log(EVDNS_LOG_DEBUG, "Sending probe to %s",
+ evutil_format_sockaddr_port_(
+ (struct sockaddr *)&ns->address,
+ addrbuf, sizeof(addrbuf)));
+ handle = mm_calloc(1, sizeof(*handle));
+ if (!handle) return;
+ req = request_new(ns->base, handle, TYPE_A, "google.com", DNS_QUERY_NO_SEARCH, nameserver_probe_callback, ns);
+ if (!req) {
+ mm_free(handle);
+ return;
+ }
+ ns->probe_request = handle;
+ /* we force this into the inflight queue no matter what */
+ request_trans_id_set(req, transaction_id_pick(ns->base));
+ req->ns = ns;
+ request_submit(req);
+}
+
+/* returns: */
+/* 0 didn't try to transmit anything */
+/* 1 tried to transmit something */
+static int
+evdns_transmit(struct evdns_base *base) {
+ char did_try_to_transmit = 0;
+ int i;
+
+ ASSERT_LOCKED(base);
+ for (i = 0; i < base->n_req_heads; ++i) {
+ if (base->req_heads[i]) {
+ struct request *const started_at = base->req_heads[i], *req = started_at;
+ /* first transmit all the requests which are currently waiting */
+ do {
+ if (req->transmit_me) {
+ did_try_to_transmit = 1;
+ evdns_request_transmit(req);
+ }
+
+ req = req->next;
+ } while (req != started_at);
+ }
+ }
+
+ return did_try_to_transmit;
+}
+
+/* exported function */
+int
+evdns_base_count_nameservers(struct evdns_base *base)
+{
+ const struct nameserver *server;
+ int n = 0;
+
+ EVDNS_LOCK(base);
+ server = base->server_head;
+ if (!server)
+ goto done;
+ do {
+ ++n;
+ server = server->next;
+ } while (server != base->server_head);
+done:
+ EVDNS_UNLOCK(base);
+ return n;
+}
+
+int
+evdns_count_nameservers(void)
+{
+ return evdns_base_count_nameservers(current_base);
+}
+
+/* exported function */
+int
+evdns_base_clear_nameservers_and_suspend(struct evdns_base *base)
+{
+ struct nameserver *server, *started_at;
+ int i;
+
+ EVDNS_LOCK(base);
+ server = base->server_head;
+ started_at = base->server_head;
+ if (!server) {
+ EVDNS_UNLOCK(base);
+ return 0;
+ }
+ while (1) {
+ struct nameserver *next = server->next;
+ (void) event_del(&server->event);
+ if (evtimer_initialized(&server->timeout_event))
+ (void) evtimer_del(&server->timeout_event);
+ if (server->probe_request) {
+ evdns_cancel_request(server->base, server->probe_request);
+ server->probe_request = NULL;
+ }
+ if (server->socket >= 0)
+ evutil_closesocket(server->socket);
+ mm_free(server);
+ if (next == started_at)
+ break;
+ server = next;
+ }
+ base->server_head = NULL;
+ base->global_good_nameservers = 0;
+
+ for (i = 0; i < base->n_req_heads; ++i) {
+ struct request *req, *req_started_at;
+ req = req_started_at = base->req_heads[i];
+ while (req) {
+ struct request *next = req->next;
+ req->tx_count = req->reissue_count = 0;
+ req->ns = NULL;
+ /* ???? What to do about searches? */
+ (void) evtimer_del(&req->timeout_event);
+ req->trans_id = 0;
+ req->transmit_me = 0;
+
+ base->global_requests_waiting++;
+ evdns_request_insert(req, &base->req_waiting_head);
+ /* We want to insert these suspended elements at the front of
+ * the waiting queue, since they were pending before any of
+ * the waiting entries were added. This is a circular list,
+ * so we can just shift the start back by one.*/
+ base->req_waiting_head = base->req_waiting_head->prev;
+
+ if (next == req_started_at)
+ break;
+ req = next;
+ }
+ base->req_heads[i] = NULL;
+ }
+
+ base->global_requests_inflight = 0;
+
+ EVDNS_UNLOCK(base);
+ return 0;
+}
+
+int
+evdns_clear_nameservers_and_suspend(void)
+{
+ return evdns_base_clear_nameservers_and_suspend(current_base);
+}
+
+
+/* exported function */
+int
+evdns_base_resume(struct evdns_base *base)
+{
+ EVDNS_LOCK(base);
+ evdns_requests_pump_waiting_queue(base);
+ EVDNS_UNLOCK(base);
+
+ return 0;
+}
+
+int
+evdns_resume(void)
+{
+ return evdns_base_resume(current_base);
+}
+
+static int
+evdns_nameserver_add_impl_(struct evdns_base *base, const struct sockaddr *address, int addrlen) {
+ /* first check to see if we already have this nameserver */
+
+ const struct nameserver *server = base->server_head, *const started_at = base->server_head;
+ struct nameserver *ns;
+ int err = 0;
+ char addrbuf[128];
+
+ ASSERT_LOCKED(base);
+ if (server) {
+ do {
+ if (!evutil_sockaddr_cmp((struct sockaddr*)&server->address, address, 1)) return 3;
+ server = server->next;
+ } while (server != started_at);
+ }
+ if (addrlen > (int)sizeof(ns->address)) {
+ log(EVDNS_LOG_DEBUG, "Addrlen %d too long.", (int)addrlen);
+ return 2;
+ }
+
+ ns = (struct nameserver *) mm_malloc(sizeof(struct nameserver));
+ if (!ns) return -1;
+
+ memset(ns, 0, sizeof(struct nameserver));
+ ns->base = base;
+
+ evtimer_assign(&ns->timeout_event, ns->base->event_base, nameserver_prod_callback, ns);
+
+ ns->socket = evutil_socket_(address->sa_family,
+ SOCK_DGRAM|EVUTIL_SOCK_NONBLOCK|EVUTIL_SOCK_CLOEXEC, 0);
+ if (ns->socket < 0) { err = 1; goto out1; }
+
+ if (base->global_outgoing_addrlen &&
+ !evutil_sockaddr_is_loopback_(address)) {
+ if (bind(ns->socket,
+ (struct sockaddr*)&base->global_outgoing_address,
+ base->global_outgoing_addrlen) < 0) {
+ log(EVDNS_LOG_WARN,"Couldn't bind to outgoing address");
+ err = 2;
+ goto out2;
+ }
+ }
+
+ memcpy(&ns->address, address, addrlen);
+ ns->addrlen = addrlen;
+ ns->state = 1;
+ event_assign(&ns->event, ns->base->event_base, ns->socket,
+ EV_READ | EV_PERSIST, nameserver_ready_callback, ns);
+ if (!base->disable_when_inactive && event_add(&ns->event, NULL) < 0) {
+ err = 2;
+ goto out2;
+ }
+
+ log(EVDNS_LOG_DEBUG, "Added nameserver %s as %p",
+ evutil_format_sockaddr_port_(address, addrbuf, sizeof(addrbuf)), ns);
+
+ /* insert this nameserver into the list of them */
+ if (!base->server_head) {
+ ns->next = ns->prev = ns;
+ base->server_head = ns;
+ } else {
+ ns->next = base->server_head->next;
+ ns->prev = base->server_head;
+ base->server_head->next = ns;
+ ns->next->prev = ns;
+ }
+
+ base->global_good_nameservers++;
+
+ return 0;
+
+out2:
+ evutil_closesocket(ns->socket);
+out1:
+ event_debug_unassign(&ns->event);
+ mm_free(ns);
+ log(EVDNS_LOG_WARN, "Unable to add nameserver %s: error %d",
+ evutil_format_sockaddr_port_(address, addrbuf, sizeof(addrbuf)), err);
+ return err;
+}
+
+/* exported function */
+int
+evdns_base_nameserver_add(struct evdns_base *base, unsigned long int address)
+{
+ struct sockaddr_in sin;
+ int res;
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_addr.s_addr = address;
+ sin.sin_port = htons(53);
+ sin.sin_family = AF_INET;
+ EVDNS_LOCK(base);
+ res = evdns_nameserver_add_impl_(base, (struct sockaddr*)&sin, sizeof(sin));
+ EVDNS_UNLOCK(base);
+ return res;
+}
+
+int
+evdns_nameserver_add(unsigned long int address) {
+ if (!current_base)
+ current_base = evdns_base_new(NULL, 0);
+ return evdns_base_nameserver_add(current_base, address);
+}
+
+static void
+sockaddr_setport(struct sockaddr *sa, ev_uint16_t port)
+{
+ if (sa->sa_family == AF_INET) {
+ ((struct sockaddr_in *)sa)->sin_port = htons(port);
+ } else if (sa->sa_family == AF_INET6) {
+ ((struct sockaddr_in6 *)sa)->sin6_port = htons(port);
+ }
+}
+
+static ev_uint16_t
+sockaddr_getport(struct sockaddr *sa)
+{
+ if (sa->sa_family == AF_INET) {
+ return ntohs(((struct sockaddr_in *)sa)->sin_port);
+ } else if (sa->sa_family == AF_INET6) {
+ return ntohs(((struct sockaddr_in6 *)sa)->sin6_port);
+ } else {
+ return 0;
+ }
+}
+
+/* exported function */
+int
+evdns_base_nameserver_ip_add(struct evdns_base *base, const char *ip_as_string) {
+ struct sockaddr_storage ss;
+ struct sockaddr *sa;
+ int len = sizeof(ss);
+ int res;
+ if (evutil_parse_sockaddr_port(ip_as_string, (struct sockaddr *)&ss,
+ &len)) {
+ log(EVDNS_LOG_WARN, "Unable to parse nameserver address %s",
+ ip_as_string);
+ return 4;
+ }
+ sa = (struct sockaddr *) &ss;
+ if (sockaddr_getport(sa) == 0)
+ sockaddr_setport(sa, 53);
+
+ EVDNS_LOCK(base);
+ res = evdns_nameserver_add_impl_(base, sa, len);
+ EVDNS_UNLOCK(base);
+ return res;
+}
+
+int
+evdns_nameserver_ip_add(const char *ip_as_string) {
+ if (!current_base)
+ current_base = evdns_base_new(NULL, 0);
+ return evdns_base_nameserver_ip_add(current_base, ip_as_string);
+}
+
+int
+evdns_base_nameserver_sockaddr_add(struct evdns_base *base,
+ const struct sockaddr *sa, ev_socklen_t len, unsigned flags)
+{
+ int res;
+ EVUTIL_ASSERT(base);
+ EVDNS_LOCK(base);
+ res = evdns_nameserver_add_impl_(base, sa, len);
+ EVDNS_UNLOCK(base);
+ return res;
+}
+
+int
+evdns_base_get_nameserver_addr(struct evdns_base *base, int idx,
+ struct sockaddr *sa, ev_socklen_t len)
+{
+ int result = -1;
+ int i;
+ struct nameserver *server;
+ EVDNS_LOCK(base);
+ server = base->server_head;
+ for (i = 0; i < idx && server; ++i, server = server->next) {
+ if (server->next == base->server_head)
+ goto done;
+ }
+ if (! server)
+ goto done;
+
+ if (server->addrlen > len) {
+ result = (int) server->addrlen;
+ goto done;
+ }
+
+ memcpy(sa, &server->address, server->addrlen);
+ result = (int) server->addrlen;
+done:
+ EVDNS_UNLOCK(base);
+ return result;
+}
+
+/* remove from the queue */
+static void
+evdns_request_remove(struct request *req, struct request **head)
+{
+ ASSERT_LOCKED(req->base);
+ ASSERT_VALID_REQUEST(req);
+
+#if 0
+ {
+ struct request *ptr;
+ int found = 0;
+ EVUTIL_ASSERT(*head != NULL);
+
+ ptr = *head;
+ do {
+ if (ptr == req) {
+ found = 1;
+ break;
+ }
+ ptr = ptr->next;
+ } while (ptr != *head);
+ EVUTIL_ASSERT(found);
+
+ EVUTIL_ASSERT(req->next);
+ }
+#endif
+
+ if (req->next == req) {
+ /* only item in the list */
+ *head = NULL;
+ } else {
+ req->next->prev = req->prev;
+ req->prev->next = req->next;
+ if (*head == req) *head = req->next;
+ }
+ req->next = req->prev = NULL;
+}
+
+/* insert into the tail of the queue */
+static void
+evdns_request_insert(struct request *req, struct request **head) {
+ ASSERT_LOCKED(req->base);
+ ASSERT_VALID_REQUEST(req);
+ if (!*head) {
+ *head = req;
+ req->next = req->prev = req;
+ return;
+ }
+
+ req->prev = (*head)->prev;
+ req->prev->next = req;
+ req->next = *head;
+ (*head)->prev = req;
+}
+
+static int
+string_num_dots(const char *s) {
+ int count = 0;
+ while ((s = strchr(s, '.'))) {
+ s++;
+ count++;
+ }
+ return count;
+}
+
+static struct request *
+request_new(struct evdns_base *base, struct evdns_request *handle, int type,
+ const char *name, int flags, evdns_callback_type callback,
+ void *user_ptr) {
+
+ const char issuing_now =
+ (base->global_requests_inflight < base->global_max_requests_inflight) ? 1 : 0;
+
+ const size_t name_len = strlen(name);
+ const size_t request_max_len = evdns_request_len(name_len);
+ const u16 trans_id = issuing_now ? transaction_id_pick(base) : 0xffff;
+ /* the request data is alloced in a single block with the header */
+ struct request *const req =
+ mm_malloc(sizeof(struct request) + request_max_len);
+ int rlen;
+ char namebuf[256];
+ (void) flags;
+
+ ASSERT_LOCKED(base);
+
+ if (!req) return NULL;
+
+ if (name_len >= sizeof(namebuf)) {
+ mm_free(req);
+ return NULL;
+ }
+
+ memset(req, 0, sizeof(struct request));
+ req->base = base;
+
+ evtimer_assign(&req->timeout_event, req->base->event_base, evdns_request_timeout_callback, req);
+
+ if (base->global_randomize_case) {
+ unsigned i;
+ char randbits[(sizeof(namebuf)+7)/8];
+ strlcpy(namebuf, name, sizeof(namebuf));
+ evutil_secure_rng_get_bytes(randbits, (name_len+7)/8);
+ for (i = 0; i < name_len; ++i) {
+ if (EVUTIL_ISALPHA_(namebuf[i])) {
+ if ((randbits[i >> 3] & (1<<(i & 7))))
+ namebuf[i] |= 0x20;
+ else
+ namebuf[i] &= ~0x20;
+ }
+ }
+ name = namebuf;
+ }
+
+ /* request data lives just after the header */
+ req->request = ((u8 *) req) + sizeof(struct request);
+ /* denotes that the request data shouldn't be free()ed */
+ req->request_appended = 1;
+ rlen = evdns_request_data_build(name, name_len, trans_id,
+ type, CLASS_INET, req->request, request_max_len);
+ if (rlen < 0)
+ goto err1;
+
+ req->request_len = rlen;
+ req->trans_id = trans_id;
+ req->tx_count = 0;
+ req->request_type = type;
+ req->user_pointer = user_ptr;
+ req->user_callback = callback;
+ req->ns = issuing_now ? nameserver_pick(base) : NULL;
+ req->next = req->prev = NULL;
+ req->handle = handle;
+ if (handle) {
+ handle->current_req = req;
+ handle->base = base;
+ }
+
+ return req;
+err1:
+ mm_free(req);
+ return NULL;
+}
+
+static void
+request_submit(struct request *const req) {
+ struct evdns_base *base = req->base;
+ ASSERT_LOCKED(base);
+ ASSERT_VALID_REQUEST(req);
+ if (req->ns) {
+ /* if it has a nameserver assigned then this is going */
+ /* straight into the inflight queue */
+ evdns_request_insert(req, &REQ_HEAD(base, req->trans_id));
+
+ base->global_requests_inflight++;
+ req->ns->requests_inflight++;
+
+ evdns_request_transmit(req);
+ } else {
+ evdns_request_insert(req, &base->req_waiting_head);
+ base->global_requests_waiting++;
+ }
+}
+
+/* exported function */
+void
+evdns_cancel_request(struct evdns_base *base, struct evdns_request *handle)
+{
+ struct request *req;
+
+ if (!handle->current_req)
+ return;
+
+ if (!base) {
+ /* This redundancy is silly; can we fix it? (Not for 2.0) XXXX */
+ base = handle->base;
+ if (!base)
+ base = handle->current_req->base;
+ }
+
+ EVDNS_LOCK(base);
+ if (handle->pending_cb) {
+ EVDNS_UNLOCK(base);
+ return;
+ }
+
+ req = handle->current_req;
+ ASSERT_VALID_REQUEST(req);
+
+ reply_schedule_callback(req, 0, DNS_ERR_CANCEL, NULL);
+ if (req->ns) {
+ /* remove from inflight queue */
+ request_finished(req, &REQ_HEAD(base, req->trans_id), 1);
+ } else {
+ /* remove from global_waiting head */
+ request_finished(req, &base->req_waiting_head, 1);
+ }
+ EVDNS_UNLOCK(base);
+}
+
+/* exported function */
+struct evdns_request *
+evdns_base_resolve_ipv4(struct evdns_base *base, const char *name, int flags,
+ evdns_callback_type callback, void *ptr) {
+ struct evdns_request *handle;
+ struct request *req;
+ log(EVDNS_LOG_DEBUG, "Resolve requested for %s", name);
+ handle = mm_calloc(1, sizeof(*handle));
+ if (handle == NULL)
+ return NULL;
+ EVDNS_LOCK(base);
+ if (flags & DNS_QUERY_NO_SEARCH) {
+ req =
+ request_new(base, handle, TYPE_A, name, flags,
+ callback, ptr);
+ if (req)
+ request_submit(req);
+ } else {
+ search_request_new(base, handle, TYPE_A, name, flags,
+ callback, ptr);
+ }
+ if (handle->current_req == NULL) {
+ mm_free(handle);
+ handle = NULL;
+ }
+ EVDNS_UNLOCK(base);
+ return handle;
+}
+
+int evdns_resolve_ipv4(const char *name, int flags,
+ evdns_callback_type callback, void *ptr)
+{
+ return evdns_base_resolve_ipv4(current_base, name, flags, callback, ptr)
+ ? 0 : -1;
+}
+
+
+/* exported function */
+struct evdns_request *
+evdns_base_resolve_ipv6(struct evdns_base *base,
+ const char *name, int flags,
+ evdns_callback_type callback, void *ptr)
+{
+ struct evdns_request *handle;
+ struct request *req;
+ log(EVDNS_LOG_DEBUG, "Resolve requested for %s", name);
+ handle = mm_calloc(1, sizeof(*handle));
+ if (handle == NULL)
+ return NULL;
+ EVDNS_LOCK(base);
+ if (flags & DNS_QUERY_NO_SEARCH) {
+ req = request_new(base, handle, TYPE_AAAA, name, flags,
+ callback, ptr);
+ if (req)
+ request_submit(req);
+ } else {
+ search_request_new(base, handle, TYPE_AAAA, name, flags,
+ callback, ptr);
+ }
+ if (handle->current_req == NULL) {
+ mm_free(handle);
+ handle = NULL;
+ }
+ EVDNS_UNLOCK(base);
+ return handle;
+}
+
+int evdns_resolve_ipv6(const char *name, int flags,
+ evdns_callback_type callback, void *ptr) {
+ return evdns_base_resolve_ipv6(current_base, name, flags, callback, ptr)
+ ? 0 : -1;
+}
+
+struct evdns_request *
+evdns_base_resolve_reverse(struct evdns_base *base, const struct in_addr *in, int flags, evdns_callback_type callback, void *ptr) {
+ char buf[32];
+ struct evdns_request *handle;
+ struct request *req;
+ u32 a;
+ EVUTIL_ASSERT(in);
+ a = ntohl(in->s_addr);
+ evutil_snprintf(buf, sizeof(buf), "%d.%d.%d.%d.in-addr.arpa",
+ (int)(u8)((a )&0xff),
+ (int)(u8)((a>>8 )&0xff),
+ (int)(u8)((a>>16)&0xff),
+ (int)(u8)((a>>24)&0xff));
+ handle = mm_calloc(1, sizeof(*handle));
+ if (handle == NULL)
+ return NULL;
+ log(EVDNS_LOG_DEBUG, "Resolve requested for %s (reverse)", buf);
+ EVDNS_LOCK(base);
+ req = request_new(base, handle, TYPE_PTR, buf, flags, callback, ptr);
+ if (req)
+ request_submit(req);
+ if (handle->current_req == NULL) {
+ mm_free(handle);
+ handle = NULL;
+ }
+ EVDNS_UNLOCK(base);
+ return (handle);
+}
+
+int evdns_resolve_reverse(const struct in_addr *in, int flags, evdns_callback_type callback, void *ptr) {
+ return evdns_base_resolve_reverse(current_base, in, flags, callback, ptr)
+ ? 0 : -1;
+}
+
+struct evdns_request *
+evdns_base_resolve_reverse_ipv6(struct evdns_base *base, const struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr) {
+ /* 32 nybbles, 32 periods, "ip6.arpa", NUL. */
+ char buf[73];
+ char *cp;
+ struct evdns_request *handle;
+ struct request *req;
+ int i;
+ EVUTIL_ASSERT(in);
+ cp = buf;
+ for (i=15; i >= 0; --i) {
+ u8 byte = in->s6_addr[i];
+ *cp++ = "0123456789abcdef"[byte & 0x0f];
+ *cp++ = '.';
+ *cp++ = "0123456789abcdef"[byte >> 4];
+ *cp++ = '.';
+ }
+ EVUTIL_ASSERT(cp + strlen("ip6.arpa") < buf+sizeof(buf));
+ memcpy(cp, "ip6.arpa", strlen("ip6.arpa")+1);
+ handle = mm_calloc(1, sizeof(*handle));
+ if (handle == NULL)
+ return NULL;
+ log(EVDNS_LOG_DEBUG, "Resolve requested for %s (reverse)", buf);
+ EVDNS_LOCK(base);
+ req = request_new(base, handle, TYPE_PTR, buf, flags, callback, ptr);
+ if (req)
+ request_submit(req);
+ if (handle->current_req == NULL) {
+ mm_free(handle);
+ handle = NULL;
+ }
+ EVDNS_UNLOCK(base);
+ return (handle);
+}
+
+int evdns_resolve_reverse_ipv6(const struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr) {
+ return evdns_base_resolve_reverse_ipv6(current_base, in, flags, callback, ptr)
+ ? 0 : -1;
+}
+
+/* ================================================================= */
+/* Search support */
+/* */
+/* the libc resolver has support for searching a number of domains */
+/* to find a name. If nothing else then it takes the single domain */
+/* from the gethostname() call. */
+/* */
+/* It can also be configured via the domain and search options in a */
+/* resolv.conf. */
+/* */
+/* The ndots option controls how many dots it takes for the resolver */
+/* to decide that a name is non-local and so try a raw lookup first. */
+
+struct search_domain {
+ int len;
+ struct search_domain *next;
+ /* the text string is appended to this structure */
+};
+
+struct search_state {
+ int refcount;
+ int ndots;
+ int num_domains;
+ struct search_domain *head;
+};
+
+static void
+search_state_decref(struct search_state *const state) {
+ if (!state) return;
+ state->refcount--;
+ if (!state->refcount) {
+ struct search_domain *next, *dom;
+ for (dom = state->head; dom; dom = next) {
+ next = dom->next;
+ mm_free(dom);
+ }
+ mm_free(state);
+ }
+}
+
+static struct search_state *
+search_state_new(void) {
+ struct search_state *state = (struct search_state *) mm_malloc(sizeof(struct search_state));
+ if (!state) return NULL;
+ memset(state, 0, sizeof(struct search_state));
+ state->refcount = 1;
+ state->ndots = 1;
+
+ return state;
+}
+
+static void
+search_postfix_clear(struct evdns_base *base) {
+ search_state_decref(base->global_search_state);
+
+ base->global_search_state = search_state_new();
+}
+
+/* exported function */
+void
+evdns_base_search_clear(struct evdns_base *base)
+{
+ EVDNS_LOCK(base);
+ search_postfix_clear(base);
+ EVDNS_UNLOCK(base);
+}
+
+void
+evdns_search_clear(void) {
+ evdns_base_search_clear(current_base);
+}
+
+static void
+search_postfix_add(struct evdns_base *base, const char *domain) {
+ size_t domain_len;
+ struct search_domain *sdomain;
+ while (domain[0] == '.') domain++;
+ domain_len = strlen(domain);
+
+ ASSERT_LOCKED(base);
+ if (!base->global_search_state) base->global_search_state = search_state_new();
+ if (!base->global_search_state) return;
+ base->global_search_state->num_domains++;
+
+ sdomain = (struct search_domain *) mm_malloc(sizeof(struct search_domain) + domain_len);
+ if (!sdomain) return;
+ memcpy( ((u8 *) sdomain) + sizeof(struct search_domain), domain, domain_len);
+ sdomain->next = base->global_search_state->head;
+ sdomain->len = (int) domain_len;
+
+ base->global_search_state->head = sdomain;
+}
+
+/* reverse the order of members in the postfix list. This is needed because, */
+/* when parsing resolv.conf we push elements in the wrong order */
+static void
+search_reverse(struct evdns_base *base) {
+ struct search_domain *cur, *prev = NULL, *next;
+ ASSERT_LOCKED(base);
+ cur = base->global_search_state->head;
+ while (cur) {
+ next = cur->next;
+ cur->next = prev;
+ prev = cur;
+ cur = next;
+ }
+
+ base->global_search_state->head = prev;
+}
+
+/* exported function */
+void
+evdns_base_search_add(struct evdns_base *base, const char *domain) {
+ EVDNS_LOCK(base);
+ search_postfix_add(base, domain);
+ EVDNS_UNLOCK(base);
+}
+void
+evdns_search_add(const char *domain) {
+ evdns_base_search_add(current_base, domain);
+}
+
+/* exported function */
+void
+evdns_base_search_ndots_set(struct evdns_base *base, const int ndots) {
+ EVDNS_LOCK(base);
+ if (!base->global_search_state) base->global_search_state = search_state_new();
+ if (base->global_search_state)
+ base->global_search_state->ndots = ndots;
+ EVDNS_UNLOCK(base);
+}
+void
+evdns_search_ndots_set(const int ndots) {
+ evdns_base_search_ndots_set(current_base, ndots);
+}
+
+static void
+search_set_from_hostname(struct evdns_base *base) {
+ char hostname[HOST_NAME_MAX + 1], *domainname;
+
+ ASSERT_LOCKED(base);
+ search_postfix_clear(base);
+ if (gethostname(hostname, sizeof(hostname))) return;
+ domainname = strchr(hostname, '.');
+ if (!domainname) return;
+ search_postfix_add(base, domainname);
+}
+
+/* warning: returns malloced string */
+static char *
+search_make_new(const struct search_state *const state, int n, const char *const base_name) {
+ const size_t base_len = strlen(base_name);
+ const char need_to_append_dot = base_name[base_len - 1] == '.' ? 0 : 1;
+ struct search_domain *dom;
+
+ for (dom = state->head; dom; dom = dom->next) {
+ if (!n--) {
+ /* this is the postfix we want */
+ /* the actual postfix string is kept at the end of the structure */
+ const u8 *const postfix = ((u8 *) dom) + sizeof(struct search_domain);
+ const int postfix_len = dom->len;
+ char *const newname = (char *) mm_malloc(base_len + need_to_append_dot + postfix_len + 1);
+ if (!newname) return NULL;
+ memcpy(newname, base_name, base_len);
+ if (need_to_append_dot) newname[base_len] = '.';
+ memcpy(newname + base_len + need_to_append_dot, postfix, postfix_len);
+ newname[base_len + need_to_append_dot + postfix_len] = 0;
+ return newname;
+ }
+ }
+
+ /* we ran off the end of the list and still didn't find the requested string */
+ EVUTIL_ASSERT(0);
+ return NULL; /* unreachable; stops warnings in some compilers. */
+}
+
+static struct request *
+search_request_new(struct evdns_base *base, struct evdns_request *handle,
+ int type, const char *const name, int flags,
+ evdns_callback_type user_callback, void *user_arg) {
+ ASSERT_LOCKED(base);
+ EVUTIL_ASSERT(type == TYPE_A || type == TYPE_AAAA);
+ EVUTIL_ASSERT(handle->current_req == NULL);
+ if ( ((flags & DNS_QUERY_NO_SEARCH) == 0) &&
+ base->global_search_state &&
+ base->global_search_state->num_domains) {
+ /* we have some domains to search */
+ struct request *req;
+ if (string_num_dots(name) >= base->global_search_state->ndots) {
+ req = request_new(base, handle, type, name, flags, user_callback, user_arg);
+ if (!req) return NULL;
+ handle->search_index = -1;
+ } else {
+ char *const new_name = search_make_new(base->global_search_state, 0, name);
+ if (!new_name) return NULL;
+ req = request_new(base, handle, type, new_name, flags, user_callback, user_arg);
+ mm_free(new_name);
+ if (!req) return NULL;
+ handle->search_index = 0;
+ }
+ EVUTIL_ASSERT(handle->search_origname == NULL);
+ handle->search_origname = mm_strdup(name);
+ if (handle->search_origname == NULL) {
+ /* XXX Should we dealloc req? If yes, how? */
+ if (req)
+ mm_free(req);
+ return NULL;
+ }
+ handle->search_state = base->global_search_state;
+ handle->search_flags = flags;
+ base->global_search_state->refcount++;
+ request_submit(req);
+ return req;
+ } else {
+ struct request *const req = request_new(base, handle, type, name, flags, user_callback, user_arg);
+ if (!req) return NULL;
+ request_submit(req);
+ return req;
+ }
+}
+
+/* this is called when a request has failed to find a name. We need to check */
+/* if it is part of a search and, if so, try the next name in the list */
+/* returns: */
+/* 0 another request has been submitted */
+/* 1 no more requests needed */
+static int
+search_try_next(struct evdns_request *const handle) {
+ struct request *req = handle->current_req;
+ struct evdns_base *base = req->base;
+ struct request *newreq;
+ ASSERT_LOCKED(base);
+ if (handle->search_state) {
+ /* it is part of a search */
+ char *new_name;
+ handle->search_index++;
+ if (handle->search_index >= handle->search_state->num_domains) {
+ /* no more postfixes to try, however we may need to try */
+ /* this name without a postfix */
+ if (string_num_dots(handle->search_origname) < handle->search_state->ndots) {
+ /* yep, we need to try it raw */
+ newreq = request_new(base, NULL, req->request_type, handle->search_origname, handle->search_flags, req->user_callback, req->user_pointer);
+ log(EVDNS_LOG_DEBUG, "Search: trying raw query %s", handle->search_origname);
+ if (newreq) {
+ search_request_finished(handle);
+ goto submit_next;
+ }
+ }
+ return 1;
+ }
+
+ new_name = search_make_new(handle->search_state, handle->search_index, handle->search_origname);
+ if (!new_name) return 1;
+ log(EVDNS_LOG_DEBUG, "Search: now trying %s (%d)", new_name, handle->search_index);
+ newreq = request_new(base, NULL, req->request_type, new_name, handle->search_flags, req->user_callback, req->user_pointer);
+ mm_free(new_name);
+ if (!newreq) return 1;
+ goto submit_next;
+ }
+ return 1;
+
+submit_next:
+ request_finished(req, &REQ_HEAD(req->base, req->trans_id), 0);
+ handle->current_req = newreq;
+ newreq->handle = handle;
+ request_submit(newreq);
+ return 0;
+}
+
+static void
+search_request_finished(struct evdns_request *const handle) {
+ ASSERT_LOCKED(handle->current_req->base);
+ if (handle->search_state) {
+ search_state_decref(handle->search_state);
+ handle->search_state = NULL;
+ }
+ if (handle->search_origname) {
+ mm_free(handle->search_origname);
+ handle->search_origname = NULL;
+ }
+}
+
+/* ================================================================= */
+/* Parsing resolv.conf files */
+
+static void
+evdns_resolv_set_defaults(struct evdns_base *base, int flags) {
+ /* if the file isn't found then we assume a local resolver */
+ ASSERT_LOCKED(base);
+ if (flags & DNS_OPTION_SEARCH) search_set_from_hostname(base);
+ if (flags & DNS_OPTION_NAMESERVERS) evdns_base_nameserver_ip_add(base,"127.0.0.1");
+}
+
+#ifndef EVENT__HAVE_STRTOK_R
+static char *
+strtok_r(char *s, const char *delim, char **state) {
+ char *cp, *start;
+ start = cp = s ? s : *state;
+ if (!cp)
+ return NULL;
+ while (*cp && !strchr(delim, *cp))
+ ++cp;
+ if (!*cp) {
+ if (cp == start)
+ return NULL;
+ *state = NULL;
+ return start;
+ } else {
+ *cp++ = '\0';
+ *state = cp;
+ return start;
+ }
+}
+#endif
+
+/* helper version of atoi which returns -1 on error */
+static int
+strtoint(const char *const str)
+{
+ char *endptr;
+ const int r = strtol(str, &endptr, 10);
+ if (*endptr) return -1;
+ return r;
+}
+
+/* Parse a number of seconds into a timeval; return -1 on error. */
+static int
+evdns_strtotimeval(const char *const str, struct timeval *out)
+{
+ double d;
+ char *endptr;
+ d = strtod(str, &endptr);
+ if (*endptr) return -1;
+ if (d < 0) return -1;
+ out->tv_sec = (int) d;
+ out->tv_usec = (int) ((d - (int) d)*1000000);
+ if (out->tv_sec == 0 && out->tv_usec < 1000) /* less than 1 msec */
+ return -1;
+ return 0;
+}
+
+/* helper version of atoi that returns -1 on error and clips to bounds. */
+static int
+strtoint_clipped(const char *const str, int min, int max)
+{
+ int r = strtoint(str);
+ if (r == -1)
+ return r;
+ else if (r<min)
+ return min;
+ else if (r>max)
+ return max;
+ else
+ return r;
+}
+
+static int
+evdns_base_set_max_requests_inflight(struct evdns_base *base, int maxinflight)
+{
+ int old_n_heads = base->n_req_heads, n_heads;
+ struct request **old_heads = base->req_heads, **new_heads, *req;
+ int i;
+
+ ASSERT_LOCKED(base);
+ if (maxinflight < 1)
+ maxinflight = 1;
+ n_heads = (maxinflight+4) / 5;
+ EVUTIL_ASSERT(n_heads > 0);
+ new_heads = mm_calloc(n_heads, sizeof(struct request*));
+ if (!new_heads)
+ return (-1);
+ if (old_heads) {
+ for (i = 0; i < old_n_heads; ++i) {
+ while (old_heads[i]) {
+ req = old_heads[i];
+ evdns_request_remove(req, &old_heads[i]);
+ evdns_request_insert(req, &new_heads[req->trans_id % n_heads]);
+ }
+ }
+ mm_free(old_heads);
+ }
+ base->req_heads = new_heads;
+ base->n_req_heads = n_heads;
+ base->global_max_requests_inflight = maxinflight;
+ return (0);
+}
+
+/* exported function */
+int
+evdns_base_set_option(struct evdns_base *base,
+ const char *option, const char *val)
+{
+ int res;
+ EVDNS_LOCK(base);
+ res = evdns_base_set_option_impl(base, option, val, DNS_OPTIONS_ALL);
+ EVDNS_UNLOCK(base);
+ return res;
+}
+
+static inline int
+str_matches_option(const char *s1, const char *optionname)
+{
+ /* Option names are given as "option:" We accept either 'option' in
+ * s1, or 'option:randomjunk'. The latter form is to implement the
+ * resolv.conf parser. */
+ size_t optlen = strlen(optionname);
+ size_t slen = strlen(s1);
+ if (slen == optlen || slen == optlen - 1)
+ return !strncmp(s1, optionname, slen);
+ else if (slen > optlen)
+ return !strncmp(s1, optionname, optlen);
+ else
+ return 0;
+}
+
+static int
+evdns_base_set_option_impl(struct evdns_base *base,
+ const char *option, const char *val, int flags)
+{
+ ASSERT_LOCKED(base);
+ if (str_matches_option(option, "ndots:")) {
+ const int ndots = strtoint(val);
+ if (ndots == -1) return -1;
+ if (!(flags & DNS_OPTION_SEARCH)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting ndots to %d", ndots);
+ if (!base->global_search_state) base->global_search_state = search_state_new();
+ if (!base->global_search_state) return -1;
+ base->global_search_state->ndots = ndots;
+ } else if (str_matches_option(option, "timeout:")) {
+ struct timeval tv;
+ if (evdns_strtotimeval(val, &tv) == -1) return -1;
+ if (!(flags & DNS_OPTION_MISC)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting timeout to %s", val);
+ memcpy(&base->global_timeout, &tv, sizeof(struct timeval));
+ } else if (str_matches_option(option, "getaddrinfo-allow-skew:")) {
+ struct timeval tv;
+ if (evdns_strtotimeval(val, &tv) == -1) return -1;
+ if (!(flags & DNS_OPTION_MISC)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting getaddrinfo-allow-skew to %s",
+ val);
+ memcpy(&base->global_getaddrinfo_allow_skew, &tv,
+ sizeof(struct timeval));
+ } else if (str_matches_option(option, "max-timeouts:")) {
+ const int maxtimeout = strtoint_clipped(val, 1, 255);
+ if (maxtimeout == -1) return -1;
+ if (!(flags & DNS_OPTION_MISC)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting maximum allowed timeouts to %d",
+ maxtimeout);
+ base->global_max_nameserver_timeout = maxtimeout;
+ } else if (str_matches_option(option, "max-inflight:")) {
+ const int maxinflight = strtoint_clipped(val, 1, 65000);
+ if (maxinflight == -1) return -1;
+ if (!(flags & DNS_OPTION_MISC)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting maximum inflight requests to %d",
+ maxinflight);
+ evdns_base_set_max_requests_inflight(base, maxinflight);
+ } else if (str_matches_option(option, "attempts:")) {
+ int retries = strtoint(val);
+ if (retries == -1) return -1;
+ if (retries > 255) retries = 255;
+ if (!(flags & DNS_OPTION_MISC)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting retries to %d", retries);
+ base->global_max_retransmits = retries;
+ } else if (str_matches_option(option, "randomize-case:")) {
+ int randcase = strtoint(val);
+ if (!(flags & DNS_OPTION_MISC)) return 0;
+ base->global_randomize_case = randcase;
+ } else if (str_matches_option(option, "bind-to:")) {
+ /* XXX This only applies to successive nameservers, not
+ * to already-configured ones. We might want to fix that. */
+ int len = sizeof(base->global_outgoing_address);
+ if (!(flags & DNS_OPTION_NAMESERVERS)) return 0;
+ if (evutil_parse_sockaddr_port(val,
+ (struct sockaddr*)&base->global_outgoing_address, &len))
+ return -1;
+ base->global_outgoing_addrlen = len;
+ } else if (str_matches_option(option, "initial-probe-timeout:")) {
+ struct timeval tv;
+ if (evdns_strtotimeval(val, &tv) == -1) return -1;
+ if (tv.tv_sec > 3600)
+ tv.tv_sec = 3600;
+ if (!(flags & DNS_OPTION_MISC)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting initial probe timeout to %s",
+ val);
+ memcpy(&base->global_nameserver_probe_initial_timeout, &tv,
+ sizeof(tv));
+ }
+ return 0;
+}
+
+int
+evdns_set_option(const char *option, const char *val, int flags)
+{
+ if (!current_base)
+ current_base = evdns_base_new(NULL, 0);
+ return evdns_base_set_option(current_base, option, val);
+}
+
+static void
+resolv_conf_parse_line(struct evdns_base *base, char *const start, int flags) {
+ char *strtok_state;
+ static const char *const delims = " \t";
+#define NEXT_TOKEN strtok_r(NULL, delims, &strtok_state)
+
+
+ char *const first_token = strtok_r(start, delims, &strtok_state);
+ ASSERT_LOCKED(base);
+ if (!first_token) return;
+
+ if (!strcmp(first_token, "nameserver") && (flags & DNS_OPTION_NAMESERVERS)) {
+ const char *const nameserver = NEXT_TOKEN;
+
+ if (nameserver)
+ evdns_base_nameserver_ip_add(base, nameserver);
+ } else if (!strcmp(first_token, "domain") && (flags & DNS_OPTION_SEARCH)) {
+ const char *const domain = NEXT_TOKEN;
+ if (domain) {
+ search_postfix_clear(base);
+ search_postfix_add(base, domain);
+ }
+ } else if (!strcmp(first_token, "search") && (flags & DNS_OPTION_SEARCH)) {
+ const char *domain;
+ search_postfix_clear(base);
+
+ while ((domain = NEXT_TOKEN)) {
+ search_postfix_add(base, domain);
+ }
+ search_reverse(base);
+ } else if (!strcmp(first_token, "options")) {
+ const char *option;
+ while ((option = NEXT_TOKEN)) {
+ const char *val = strchr(option, ':');
+ evdns_base_set_option_impl(base, option, val ? val+1 : "", flags);
+ }
+ }
+#undef NEXT_TOKEN
+}
+
+/* exported function */
+/* returns: */
+/* 0 no errors */
+/* 1 failed to open file */
+/* 2 failed to stat file */
+/* 3 file too large */
+/* 4 out of memory */
+/* 5 short read from file */
+int
+evdns_base_resolv_conf_parse(struct evdns_base *base, int flags, const char *const filename) {
+ int res;
+ EVDNS_LOCK(base);
+ res = evdns_base_resolv_conf_parse_impl(base, flags, filename);
+ EVDNS_UNLOCK(base);
+ return res;
+}
+
+static char *
+evdns_get_default_hosts_filename(void)
+{
+#ifdef _WIN32
+ /* Windows is a little coy about where it puts its configuration
+ * files. Sure, they're _usually_ in C:\windows\system32, but
+ * there's no reason in principle they couldn't be in
+ * W:\hoboken chicken emergency\
+ */
+ char path[MAX_PATH+1];
+ static const char hostfile[] = "\\drivers\\etc\\hosts";
+ char *path_out;
+ size_t len_out;
+
+ if (! SHGetSpecialFolderPathA(NULL, path, CSIDL_SYSTEM, 0))
+ return NULL;
+ len_out = strlen(path)+strlen(hostfile)+1;
+ path_out = mm_malloc(len_out);
+ evutil_snprintf(path_out, len_out, "%s%s", path, hostfile);
+ return path_out;
+#else
+ return mm_strdup("/etc/hosts");
+#endif
+}
+
+static int
+evdns_base_resolv_conf_parse_impl(struct evdns_base *base, int flags, const char *const filename) {
+ size_t n;
+ char *resolv;
+ char *start;
+ int err = 0;
+
+ log(EVDNS_LOG_DEBUG, "Parsing resolv.conf file %s", filename);
+
+ if (flags & DNS_OPTION_HOSTSFILE) {
+ char *fname = evdns_get_default_hosts_filename();
+ evdns_base_load_hosts(base, fname);
+ if (fname)
+ mm_free(fname);
+ }
+
+ if ((err = evutil_read_file_(filename, &resolv, &n, 0)) < 0) {
+ if (err == -1) {
+ /* No file. */
+ evdns_resolv_set_defaults(base, flags);
+ return 1;
+ } else {
+ return 2;
+ }
+ }
+
+ start = resolv;
+ for (;;) {
+ char *const newline = strchr(start, '\n');
+ if (!newline) {
+ resolv_conf_parse_line(base, start, flags);
+ break;
+ } else {
+ *newline = 0;
+ resolv_conf_parse_line(base, start, flags);
+ start = newline + 1;
+ }
+ }
+
+ if (!base->server_head && (flags & DNS_OPTION_NAMESERVERS)) {
+ /* no nameservers were configured. */
+ evdns_base_nameserver_ip_add(base, "127.0.0.1");
+ err = 6;
+ }
+ if (flags & DNS_OPTION_SEARCH && (!base->global_search_state || base->global_search_state->num_domains == 0)) {
+ search_set_from_hostname(base);
+ }
+
+ mm_free(resolv);
+ return err;
+}
+
+int
+evdns_resolv_conf_parse(int flags, const char *const filename) {
+ if (!current_base)
+ current_base = evdns_base_new(NULL, 0);
+ return evdns_base_resolv_conf_parse(current_base, flags, filename);
+}
+
+
+#ifdef _WIN32
+/* Add multiple nameservers from a space-or-comma-separated list. */
+static int
+evdns_nameserver_ip_add_line(struct evdns_base *base, const char *ips) {
+ const char *addr;
+ char *buf;
+ int r;
+ ASSERT_LOCKED(base);
+ while (*ips) {
+ while (isspace(*ips) || *ips == ',' || *ips == '\t')
+ ++ips;
+ addr = ips;
+ while (isdigit(*ips) || *ips == '.' || *ips == ':' ||
+ *ips=='[' || *ips==']')
+ ++ips;
+ buf = mm_malloc(ips-addr+1);
+ if (!buf) return 4;
+ memcpy(buf, addr, ips-addr);
+ buf[ips-addr] = '\0';
+ r = evdns_base_nameserver_ip_add(base, buf);
+ mm_free(buf);
+ if (r) return r;
+ }
+ return 0;
+}
+
+typedef DWORD(WINAPI *GetNetworkParams_fn_t)(FIXED_INFO *, DWORD*);
+
+/* Use the windows GetNetworkParams interface in iphlpapi.dll to */
+/* figure out what our nameservers are. */
+static int
+load_nameservers_with_getnetworkparams(struct evdns_base *base)
+{
+ /* Based on MSDN examples and inspection of c-ares code. */
+ FIXED_INFO *fixed;
+ HMODULE handle = 0;
+ ULONG size = sizeof(FIXED_INFO);
+ void *buf = NULL;
+ int status = 0, r, added_any;
+ IP_ADDR_STRING *ns;
+ GetNetworkParams_fn_t fn;
+
+ ASSERT_LOCKED(base);
+ if (!(handle = evutil_load_windows_system_library_(
+ TEXT("iphlpapi.dll")))) {
+ log(EVDNS_LOG_WARN, "Could not open iphlpapi.dll");
+ status = -1;
+ goto done;
+ }
+ if (!(fn = (GetNetworkParams_fn_t) GetProcAddress(handle, "GetNetworkParams"))) {
+ log(EVDNS_LOG_WARN, "Could not get address of function.");
+ status = -1;
+ goto done;
+ }
+
+ buf = mm_malloc(size);
+ if (!buf) { status = 4; goto done; }
+ fixed = buf;
+ r = fn(fixed, &size);
+ if (r != ERROR_SUCCESS && r != ERROR_BUFFER_OVERFLOW) {
+ status = -1;
+ goto done;
+ }
+ if (r != ERROR_SUCCESS) {
+ mm_free(buf);
+ buf = mm_malloc(size);
+ if (!buf) { status = 4; goto done; }
+ fixed = buf;
+ r = fn(fixed, &size);
+ if (r != ERROR_SUCCESS) {
+ log(EVDNS_LOG_DEBUG, "fn() failed.");
+ status = -1;
+ goto done;
+ }
+ }
+
+ EVUTIL_ASSERT(fixed);
+ added_any = 0;
+ ns = &(fixed->DnsServerList);
+ while (ns) {
+ r = evdns_nameserver_ip_add_line(base, ns->IpAddress.String);
+ if (r) {
+ log(EVDNS_LOG_DEBUG,"Could not add nameserver %s to list,error: %d",
+ (ns->IpAddress.String),(int)GetLastError());
+ status = r;
+ } else {
+ ++added_any;
+ log(EVDNS_LOG_DEBUG,"Successfully added %s as nameserver",ns->IpAddress.String);
+ }
+
+ ns = ns->Next;
+ }
+
+ if (!added_any) {
+ log(EVDNS_LOG_DEBUG, "No nameservers added.");
+ if (status == 0)
+ status = -1;
+ } else {
+ status = 0;
+ }
+
+ done:
+ if (buf)
+ mm_free(buf);
+ if (handle)
+ FreeLibrary(handle);
+ return status;
+}
+
+static int
+config_nameserver_from_reg_key(struct evdns_base *base, HKEY key, const TCHAR *subkey)
+{
+ char *buf;
+ DWORD bufsz = 0, type = 0;
+ int status = 0;
+
+ ASSERT_LOCKED(base);
+ if (RegQueryValueEx(key, subkey, 0, &type, NULL, &bufsz)
+ != ERROR_MORE_DATA)
+ return -1;
+ if (!(buf = mm_malloc(bufsz)))
+ return -1;
+
+ if (RegQueryValueEx(key, subkey, 0, &type, (LPBYTE)buf, &bufsz)
+ == ERROR_SUCCESS && bufsz > 1) {
+ status = evdns_nameserver_ip_add_line(base,buf);
+ }
+
+ mm_free(buf);
+ return status;
+}
+
+#define SERVICES_KEY TEXT("System\\CurrentControlSet\\Services\\")
+#define WIN_NS_9X_KEY SERVICES_KEY TEXT("VxD\\MSTCP")
+#define WIN_NS_NT_KEY SERVICES_KEY TEXT("Tcpip\\Parameters")
+
+static int
+load_nameservers_from_registry(struct evdns_base *base)
+{
+ int found = 0;
+ int r;
+#define TRY(k, name) \
+ if (!found && config_nameserver_from_reg_key(base,k,TEXT(name)) == 0) { \
+ log(EVDNS_LOG_DEBUG,"Found nameservers in %s/%s",#k,name); \
+ found = 1; \
+ } else if (!found) { \
+ log(EVDNS_LOG_DEBUG,"Didn't find nameservers in %s/%s", \
+ #k,#name); \
+ }
+
+ ASSERT_LOCKED(base);
+
+ if (((int)GetVersion()) > 0) { /* NT */
+ HKEY nt_key = 0, interfaces_key = 0;
+
+ if (RegOpenKeyEx(HKEY_LOCAL_MACHINE, WIN_NS_NT_KEY, 0,
+ KEY_READ, &nt_key) != ERROR_SUCCESS) {
+ log(EVDNS_LOG_DEBUG,"Couldn't open nt key, %d",(int)GetLastError());
+ return -1;
+ }
+ r = RegOpenKeyEx(nt_key, TEXT("Interfaces"), 0,
+ KEY_QUERY_VALUE|KEY_ENUMERATE_SUB_KEYS,
+ &interfaces_key);
+ if (r != ERROR_SUCCESS) {
+ log(EVDNS_LOG_DEBUG,"Couldn't open interfaces key, %d",(int)GetLastError());
+ return -1;
+ }
+ TRY(nt_key, "NameServer");
+ TRY(nt_key, "DhcpNameServer");
+ TRY(interfaces_key, "NameServer");
+ TRY(interfaces_key, "DhcpNameServer");
+ RegCloseKey(interfaces_key);
+ RegCloseKey(nt_key);
+ } else {
+ HKEY win_key = 0;
+ if (RegOpenKeyEx(HKEY_LOCAL_MACHINE, WIN_NS_9X_KEY, 0,
+ KEY_READ, &win_key) != ERROR_SUCCESS) {
+ log(EVDNS_LOG_DEBUG, "Couldn't open registry key, %d", (int)GetLastError());
+ return -1;
+ }
+ TRY(win_key, "NameServer");
+ RegCloseKey(win_key);
+ }
+
+ if (found == 0) {
+ log(EVDNS_LOG_WARN,"Didn't find any nameservers.");
+ }
+
+ return found ? 0 : -1;
+#undef TRY
+}
+
+int
+evdns_base_config_windows_nameservers(struct evdns_base *base)
+{
+ int r;
+ char *fname;
+ if (base == NULL)
+ base = current_base;
+ if (base == NULL)
+ return -1;
+ EVDNS_LOCK(base);
+ fname = evdns_get_default_hosts_filename();
+ log(EVDNS_LOG_DEBUG, "Loading hosts entries from %s", fname);
+ evdns_base_load_hosts(base, fname);
+ if (fname)
+ mm_free(fname);
+
+ if (load_nameservers_with_getnetworkparams(base) == 0) {
+ EVDNS_UNLOCK(base);
+ return 0;
+ }
+ r = load_nameservers_from_registry(base);
+
+ EVDNS_UNLOCK(base);
+ return r;
+}
+
+int
+evdns_config_windows_nameservers(void)
+{
+ if (!current_base) {
+ current_base = evdns_base_new(NULL, 1);
+ return current_base == NULL ? -1 : 0;
+ } else {
+ return evdns_base_config_windows_nameservers(current_base);
+ }
+}
+#endif
+
+struct evdns_base *
+evdns_base_new(struct event_base *event_base, int flags)
+{
+ struct evdns_base *base;
+
+ if (evutil_secure_rng_init() < 0) {
+ log(EVDNS_LOG_WARN, "Unable to seed random number generator; "
+ "DNS can't run.");
+ return NULL;
+ }
+
+ /* Give the evutil library a hook into its evdns-enabled
+ * functionality. We can't just call evdns_getaddrinfo directly or
+ * else libevent-core will depend on libevent-extras. */
+ evutil_set_evdns_getaddrinfo_fn_(evdns_getaddrinfo);
+
+ base = mm_malloc(sizeof(struct evdns_base));
+ if (base == NULL)
+ return (NULL);
+ memset(base, 0, sizeof(struct evdns_base));
+ base->req_waiting_head = NULL;
+
+ EVTHREAD_ALLOC_LOCK(base->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ EVDNS_LOCK(base);
+
+ /* Set max requests inflight and allocate req_heads. */
+ base->req_heads = NULL;
+
+ evdns_base_set_max_requests_inflight(base, 64);
+
+ base->server_head = NULL;
+ base->event_base = event_base;
+ base->global_good_nameservers = base->global_requests_inflight =
+ base->global_requests_waiting = 0;
+
+ base->global_timeout.tv_sec = 5;
+ base->global_timeout.tv_usec = 0;
+ base->global_max_reissues = 1;
+ base->global_max_retransmits = 3;
+ base->global_max_nameserver_timeout = 3;
+ base->global_search_state = NULL;
+ base->global_randomize_case = 1;
+ base->global_getaddrinfo_allow_skew.tv_sec = 3;
+ base->global_getaddrinfo_allow_skew.tv_usec = 0;
+ base->global_nameserver_probe_initial_timeout.tv_sec = 10;
+ base->global_nameserver_probe_initial_timeout.tv_usec = 0;
+
+ TAILQ_INIT(&base->hostsdb);
+
+#define EVDNS_BASE_ALL_FLAGS (0x8001)
+ if (flags & ~EVDNS_BASE_ALL_FLAGS) {
+ flags = EVDNS_BASE_INITIALIZE_NAMESERVERS;
+ log(EVDNS_LOG_WARN,
+ "Unrecognized flag passed to evdns_base_new(). Assuming "
+ "you meant EVDNS_BASE_INITIALIZE_NAMESERVERS.");
+ }
+#undef EVDNS_BASE_ALL_FLAGS
+
+ if (flags & EVDNS_BASE_INITIALIZE_NAMESERVERS) {
+ int r;
+#ifdef _WIN32
+ r = evdns_base_config_windows_nameservers(base);
+#else
+ r = evdns_base_resolv_conf_parse(base, DNS_OPTIONS_ALL, "/etc/resolv.conf");
+#endif
+ if (r == -1) {
+ evdns_base_free_and_unlock(base, 0);
+ return NULL;
+ }
+ }
+ if (flags & EVDNS_BASE_DISABLE_WHEN_INACTIVE) {
+ base->disable_when_inactive = 1;
+ }
+
+ EVDNS_UNLOCK(base);
+ return base;
+}
+
+int
+evdns_init(void)
+{
+ struct evdns_base *base = evdns_base_new(NULL, 1);
+ if (base) {
+ current_base = base;
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+const char *
+evdns_err_to_string(int err)
+{
+ switch (err) {
+ case DNS_ERR_NONE: return "no error";
+ case DNS_ERR_FORMAT: return "misformatted query";
+ case DNS_ERR_SERVERFAILED: return "server failed";
+ case DNS_ERR_NOTEXIST: return "name does not exist";
+ case DNS_ERR_NOTIMPL: return "query not implemented";
+ case DNS_ERR_REFUSED: return "refused";
+
+ case DNS_ERR_TRUNCATED: return "reply truncated or ill-formed";
+ case DNS_ERR_UNKNOWN: return "unknown";
+ case DNS_ERR_TIMEOUT: return "request timed out";
+ case DNS_ERR_SHUTDOWN: return "dns subsystem shut down";
+ case DNS_ERR_CANCEL: return "dns request canceled";
+ case DNS_ERR_NODATA: return "no records in the reply";
+ default: return "[Unknown error code]";
+ }
+}
+
+static void
+evdns_nameserver_free(struct nameserver *server)
+{
+ if (server->socket >= 0)
+ evutil_closesocket(server->socket);
+ (void) event_del(&server->event);
+ event_debug_unassign(&server->event);
+ if (server->state == 0)
+ (void) event_del(&server->timeout_event);
+ if (server->probe_request) {
+ evdns_cancel_request(server->base, server->probe_request);
+ server->probe_request = NULL;
+ }
+ event_debug_unassign(&server->timeout_event);
+ mm_free(server);
+}
+
+static void
+evdns_base_free_and_unlock(struct evdns_base *base, int fail_requests)
+{
+ struct nameserver *server, *server_next;
+ struct search_domain *dom, *dom_next;
+ int i;
+
+ /* Requires that we hold the lock. */
+
+ /* TODO(nickm) we might need to refcount here. */
+
+ for (i = 0; i < base->n_req_heads; ++i) {
+ while (base->req_heads[i]) {
+ if (fail_requests)
+ reply_schedule_callback(base->req_heads[i], 0, DNS_ERR_SHUTDOWN, NULL);
+ request_finished(base->req_heads[i], &REQ_HEAD(base, base->req_heads[i]->trans_id), 1);
+ }
+ }
+ while (base->req_waiting_head) {
+ if (fail_requests)
+ reply_schedule_callback(base->req_waiting_head, 0, DNS_ERR_SHUTDOWN, NULL);
+ request_finished(base->req_waiting_head, &base->req_waiting_head, 1);
+ }
+ base->global_requests_inflight = base->global_requests_waiting = 0;
+
+ for (server = base->server_head; server; server = server_next) {
+ server_next = server->next;
+ evdns_nameserver_free(server);
+ if (server_next == base->server_head)
+ break;
+ }
+ base->server_head = NULL;
+ base->global_good_nameservers = 0;
+
+ if (base->global_search_state) {
+ for (dom = base->global_search_state->head; dom; dom = dom_next) {
+ dom_next = dom->next;
+ mm_free(dom);
+ }
+ mm_free(base->global_search_state);
+ base->global_search_state = NULL;
+ }
+
+ {
+ struct hosts_entry *victim;
+ while ((victim = TAILQ_FIRST(&base->hostsdb))) {
+ TAILQ_REMOVE(&base->hostsdb, victim, next);
+ mm_free(victim);
+ }
+ }
+
+ mm_free(base->req_heads);
+
+ EVDNS_UNLOCK(base);
+ EVTHREAD_FREE_LOCK(base->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+
+ mm_free(base);
+}
+
+void
+evdns_base_free(struct evdns_base *base, int fail_requests)
+{
+ EVDNS_LOCK(base);
+ evdns_base_free_and_unlock(base, fail_requests);
+}
+
+void
+evdns_base_clear_host_addresses(struct evdns_base *base)
+{
+ struct hosts_entry *victim;
+ EVDNS_LOCK(base);
+ while ((victim = TAILQ_FIRST(&base->hostsdb))) {
+ TAILQ_REMOVE(&base->hostsdb, victim, next);
+ mm_free(victim);
+ }
+ EVDNS_UNLOCK(base);
+}
+
+void
+evdns_shutdown(int fail_requests)
+{
+ if (current_base) {
+ struct evdns_base *b = current_base;
+ current_base = NULL;
+ evdns_base_free(b, fail_requests);
+ }
+ evdns_log_fn = NULL;
+}
+
+static int
+evdns_base_parse_hosts_line(struct evdns_base *base, char *line)
+{
+ char *strtok_state;
+ static const char *const delims = " \t";
+ char *const addr = strtok_r(line, delims, &strtok_state);
+ char *hostname, *hash;
+ struct sockaddr_storage ss;
+ int socklen = sizeof(ss);
+ ASSERT_LOCKED(base);
+
+#define NEXT_TOKEN strtok_r(NULL, delims, &strtok_state)
+
+ if (!addr || *addr == '#')
+ return 0;
+
+ memset(&ss, 0, sizeof(ss));
+ if (evutil_parse_sockaddr_port(addr, (struct sockaddr*)&ss, &socklen)<0)
+ return -1;
+ if (socklen > (int)sizeof(struct sockaddr_in6))
+ return -1;
+
+ if (sockaddr_getport((struct sockaddr*)&ss))
+ return -1;
+
+ while ((hostname = NEXT_TOKEN)) {
+ struct hosts_entry *he;
+ size_t namelen;
+ if ((hash = strchr(hostname, '#'))) {
+ if (hash == hostname)
+ return 0;
+ *hash = '\0';
+ }
+
+ namelen = strlen(hostname);
+
+ he = mm_calloc(1, sizeof(struct hosts_entry)+namelen);
+ if (!he)
+ return -1;
+ EVUTIL_ASSERT(socklen <= (int)sizeof(he->addr));
+ memcpy(&he->addr, &ss, socklen);
+ memcpy(he->hostname, hostname, namelen+1);
+ he->addrlen = socklen;
+
+ TAILQ_INSERT_TAIL(&base->hostsdb, he, next);
+
+ if (hash)
+ return 0;
+ }
+
+ return 0;
+#undef NEXT_TOKEN
+}
+
+static int
+evdns_base_load_hosts_impl(struct evdns_base *base, const char *hosts_fname)
+{
+ char *str=NULL, *cp, *eol;
+ size_t len;
+ int err=0;
+
+ ASSERT_LOCKED(base);
+
+ if (hosts_fname == NULL ||
+ (err = evutil_read_file_(hosts_fname, &str, &len, 0)) < 0) {
+ char tmp[64];
+ strlcpy(tmp, "127.0.0.1 localhost", sizeof(tmp));
+ evdns_base_parse_hosts_line(base, tmp);
+ strlcpy(tmp, "::1 localhost", sizeof(tmp));
+ evdns_base_parse_hosts_line(base, tmp);
+ return err ? -1 : 0;
+ }
+
+ /* This will break early if there is a NUL in the hosts file.
+ * Probably not a problem.*/
+ cp = str;
+ for (;;) {
+ eol = strchr(cp, '\n');
+
+ if (eol) {
+ *eol = '\0';
+ evdns_base_parse_hosts_line(base, cp);
+ cp = eol+1;
+ } else {
+ evdns_base_parse_hosts_line(base, cp);
+ break;
+ }
+ }
+
+ mm_free(str);
+ return 0;
+}
+
+int
+evdns_base_load_hosts(struct evdns_base *base, const char *hosts_fname)
+{
+ int res;
+ if (!base)
+ base = current_base;
+ EVDNS_LOCK(base);
+ res = evdns_base_load_hosts_impl(base, hosts_fname);
+ EVDNS_UNLOCK(base);
+ return res;
+}
+
+/* A single request for a getaddrinfo, either v4 or v6. */
+struct getaddrinfo_subrequest {
+ struct evdns_request *r;
+ ev_uint32_t type;
+};
+
+/* State data used to implement an in-progress getaddrinfo. */
+struct evdns_getaddrinfo_request {
+ struct evdns_base *evdns_base;
+ /* Copy of the modified 'hints' data that we'll use to build
+ * answers. */
+ struct evutil_addrinfo hints;
+ /* The callback to invoke when we're done */
+ evdns_getaddrinfo_cb user_cb;
+ /* User-supplied data to give to the callback. */
+ void *user_data;
+ /* The port to use when building sockaddrs. */
+ ev_uint16_t port;
+ /* The sub_request for an A record (if any) */
+ struct getaddrinfo_subrequest ipv4_request;
+ /* The sub_request for an AAAA record (if any) */
+ struct getaddrinfo_subrequest ipv6_request;
+
+ /* The cname result that we were told (if any) */
+ char *cname_result;
+
+ /* If we have one request answered and one request still inflight,
+ * then this field holds the answer from the first request... */
+ struct evutil_addrinfo *pending_result;
+ /* And this event is a timeout that will tell us to cancel the second
+ * request if it's taking a long time. */
+ struct event timeout;
+
+ /* And this field holds the error code from the first request... */
+ int pending_error;
+ /* If this is set, the user canceled this request. */
+ unsigned user_canceled : 1;
+ /* If this is set, the user can no longer cancel this request; we're
+ * just waiting for the free. */
+ unsigned request_done : 1;
+};
+
+/* Convert an evdns errors to the equivalent getaddrinfo error. */
+static int
+evdns_err_to_getaddrinfo_err(int e1)
+{
+ /* XXX Do this better! */
+ if (e1 == DNS_ERR_NONE)
+ return 0;
+ else if (e1 == DNS_ERR_NOTEXIST)
+ return EVUTIL_EAI_NONAME;
+ else
+ return EVUTIL_EAI_FAIL;
+}
+
+/* Return the more informative of two getaddrinfo errors. */
+static int
+getaddrinfo_merge_err(int e1, int e2)
+{
+ /* XXXX be cleverer here. */
+ if (e1 == 0)
+ return e2;
+ else
+ return e1;
+}
+
+static void
+free_getaddrinfo_request(struct evdns_getaddrinfo_request *data)
+{
+ /* DO NOT CALL this if either of the requests is pending. Only once
+ * both callbacks have been invoked is it safe to free the request */
+ if (data->pending_result)
+ evutil_freeaddrinfo(data->pending_result);
+ if (data->cname_result)
+ mm_free(data->cname_result);
+ event_del(&data->timeout);
+ mm_free(data);
+ return;
+}
+
+static void
+add_cname_to_reply(struct evdns_getaddrinfo_request *data,
+ struct evutil_addrinfo *ai)
+{
+ if (data->cname_result && ai) {
+ ai->ai_canonname = data->cname_result;
+ data->cname_result = NULL;
+ }
+}
+
+/* Callback: invoked when one request in a mixed-format A/AAAA getaddrinfo
+ * request has finished, but the other one took too long to answer. Pass
+ * along the answer we got, and cancel the other request.
+ */
+static void
+evdns_getaddrinfo_timeout_cb(evutil_socket_t fd, short what, void *ptr)
+{
+ int v4_timedout = 0, v6_timedout = 0;
+ struct evdns_getaddrinfo_request *data = ptr;
+
+ /* Cancel any pending requests, and note which one */
+ if (data->ipv4_request.r) {
+ /* XXXX This does nothing if the request's callback is already
+ * running (pending_cb is set). */
+ evdns_cancel_request(NULL, data->ipv4_request.r);
+ v4_timedout = 1;
+ EVDNS_LOCK(data->evdns_base);
+ ++data->evdns_base->getaddrinfo_ipv4_timeouts;
+ EVDNS_UNLOCK(data->evdns_base);
+ }
+ if (data->ipv6_request.r) {
+ /* XXXX This does nothing if the request's callback is already
+ * running (pending_cb is set). */
+ evdns_cancel_request(NULL, data->ipv6_request.r);
+ v6_timedout = 1;
+ EVDNS_LOCK(data->evdns_base);
+ ++data->evdns_base->getaddrinfo_ipv6_timeouts;
+ EVDNS_UNLOCK(data->evdns_base);
+ }
+
+ /* We only use this timeout callback when we have an answer for
+ * one address. */
+ EVUTIL_ASSERT(!v4_timedout || !v6_timedout);
+
+ /* Report the outcome of the other request that didn't time out. */
+ if (data->pending_result) {
+ add_cname_to_reply(data, data->pending_result);
+ data->user_cb(0, data->pending_result, data->user_data);
+ data->pending_result = NULL;
+ } else {
+ int e = data->pending_error;
+ if (!e)
+ e = EVUTIL_EAI_AGAIN;
+ data->user_cb(e, NULL, data->user_data);
+ }
+
+ data->user_cb = NULL; /* prevent double-call if evdns callbacks are
+ * in-progress. XXXX It would be better if this
+ * weren't necessary. */
+
+ if (!v4_timedout && !v6_timedout) {
+ /* should be impossible? XXXX */
+ free_getaddrinfo_request(data);
+ }
+}
+
+static int
+evdns_getaddrinfo_set_timeout(struct evdns_base *evdns_base,
+ struct evdns_getaddrinfo_request *data)
+{
+ return event_add(&data->timeout, &evdns_base->global_getaddrinfo_allow_skew);
+}
+
+static inline int
+evdns_result_is_answer(int result)
+{
+ return (result != DNS_ERR_NOTIMPL && result != DNS_ERR_REFUSED &&
+ result != DNS_ERR_SERVERFAILED && result != DNS_ERR_CANCEL);
+}
+
+static void
+evdns_getaddrinfo_gotresolve(int result, char type, int count,
+ int ttl, void *addresses, void *arg)
+{
+ int i;
+ struct getaddrinfo_subrequest *req = arg;
+ struct getaddrinfo_subrequest *other_req;
+ struct evdns_getaddrinfo_request *data;
+
+ struct evutil_addrinfo *res;
+
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ struct sockaddr *sa;
+ int socklen, addrlen;
+ void *addrp;
+ int err;
+ int user_canceled;
+
+ EVUTIL_ASSERT(req->type == DNS_IPv4_A || req->type == DNS_IPv6_AAAA);
+ if (req->type == DNS_IPv4_A) {
+ data = EVUTIL_UPCAST(req, struct evdns_getaddrinfo_request, ipv4_request);
+ other_req = &data->ipv6_request;
+ } else {
+ data = EVUTIL_UPCAST(req, struct evdns_getaddrinfo_request, ipv6_request);
+ other_req = &data->ipv4_request;
+ }
+
+ /** Called from evdns_base_free() with @fail_requests == 1 */
+ if (result != DNS_ERR_SHUTDOWN) {
+ EVDNS_LOCK(data->evdns_base);
+ if (evdns_result_is_answer(result)) {
+ if (req->type == DNS_IPv4_A)
+ ++data->evdns_base->getaddrinfo_ipv4_answered;
+ else
+ ++data->evdns_base->getaddrinfo_ipv6_answered;
+ }
+ user_canceled = data->user_canceled;
+ if (other_req->r == NULL)
+ data->request_done = 1;
+ EVDNS_UNLOCK(data->evdns_base);
+ } else {
+ data->evdns_base = NULL;
+ user_canceled = data->user_canceled;
+ }
+
+ req->r = NULL;
+
+ if (result == DNS_ERR_CANCEL && ! user_canceled) {
+ /* Internal cancel request from timeout or internal error.
+ * we already answered the user. */
+ if (other_req->r == NULL)
+ free_getaddrinfo_request(data);
+ return;
+ }
+
+ if (data->user_cb == NULL) {
+ /* We already answered. XXXX This shouldn't be needed; see
+ * comments in evdns_getaddrinfo_timeout_cb */
+ free_getaddrinfo_request(data);
+ return;
+ }
+
+ if (result == DNS_ERR_NONE) {
+ if (count == 0)
+ err = EVUTIL_EAI_NODATA;
+ else
+ err = 0;
+ } else {
+ err = evdns_err_to_getaddrinfo_err(result);
+ }
+
+ if (err) {
+ /* Looks like we got an error. */
+ if (other_req->r) {
+ /* The other request is still working; maybe it will
+ * succeed. */
+ /* XXXX handle failure from set_timeout */
+ if (result != DNS_ERR_SHUTDOWN) {
+ evdns_getaddrinfo_set_timeout(data->evdns_base, data);
+ }
+ data->pending_error = err;
+ return;
+ }
+
+ if (user_canceled) {
+ data->user_cb(EVUTIL_EAI_CANCEL, NULL, data->user_data);
+ } else if (data->pending_result) {
+ /* If we have an answer waiting, and we weren't
+ * canceled, ignore this error. */
+ add_cname_to_reply(data, data->pending_result);
+ data->user_cb(0, data->pending_result, data->user_data);
+ data->pending_result = NULL;
+ } else {
+ if (data->pending_error)
+ err = getaddrinfo_merge_err(err,
+ data->pending_error);
+ data->user_cb(err, NULL, data->user_data);
+ }
+ free_getaddrinfo_request(data);
+ return;
+ } else if (user_canceled) {
+ if (other_req->r) {
+ /* The other request is still working; let it hit this
+ * callback with EVUTIL_EAI_CANCEL callback and report
+ * the failure. */
+ return;
+ }
+ data->user_cb(EVUTIL_EAI_CANCEL, NULL, data->user_data);
+ free_getaddrinfo_request(data);
+ return;
+ }
+
+ /* Looks like we got some answers. We should turn them into addrinfos
+ * and then either queue those or return them all. */
+ EVUTIL_ASSERT(type == DNS_IPv4_A || type == DNS_IPv6_AAAA);
+
+ if (type == DNS_IPv4_A) {
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(data->port);
+
+ sa = (struct sockaddr *)&sin;
+ socklen = sizeof(sin);
+ addrlen = 4;
+ addrp = &sin.sin_addr.s_addr;
+ } else {
+ memset(&sin6, 0, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = htons(data->port);
+
+ sa = (struct sockaddr *)&sin6;
+ socklen = sizeof(sin6);
+ addrlen = 16;
+ addrp = &sin6.sin6_addr.s6_addr;
+ }
+
+ res = NULL;
+ for (i=0; i < count; ++i) {
+ struct evutil_addrinfo *ai;
+ memcpy(addrp, ((char*)addresses)+i*addrlen, addrlen);
+ ai = evutil_new_addrinfo_(sa, socklen, &data->hints);
+ if (!ai) {
+ if (other_req->r) {
+ evdns_cancel_request(NULL, other_req->r);
+ }
+ data->user_cb(EVUTIL_EAI_MEMORY, NULL, data->user_data);
+ if (res)
+ evutil_freeaddrinfo(res);
+
+ if (other_req->r == NULL)
+ free_getaddrinfo_request(data);
+ return;
+ }
+ res = evutil_addrinfo_append_(res, ai);
+ }
+
+ if (other_req->r) {
+ /* The other request is still in progress; wait for it */
+ /* XXXX handle failure from set_timeout */
+ evdns_getaddrinfo_set_timeout(data->evdns_base, data);
+ data->pending_result = res;
+ return;
+ } else {
+ /* The other request is done or never started; append its
+ * results (if any) and return them. */
+ if (data->pending_result) {
+ if (req->type == DNS_IPv4_A)
+ res = evutil_addrinfo_append_(res,
+ data->pending_result);
+ else
+ res = evutil_addrinfo_append_(
+ data->pending_result, res);
+ data->pending_result = NULL;
+ }
+
+ /* Call the user callback. */
+ add_cname_to_reply(data, res);
+ data->user_cb(0, res, data->user_data);
+
+ /* Free data. */
+ free_getaddrinfo_request(data);
+ }
+}
+
+static struct hosts_entry *
+find_hosts_entry(struct evdns_base *base, const char *hostname,
+ struct hosts_entry *find_after)
+{
+ struct hosts_entry *e;
+
+ if (find_after)
+ e = TAILQ_NEXT(find_after, next);
+ else
+ e = TAILQ_FIRST(&base->hostsdb);
+
+ for (; e; e = TAILQ_NEXT(e, next)) {
+ if (!evutil_ascii_strcasecmp(e->hostname, hostname))
+ return e;
+ }
+ return NULL;
+}
+
+static int
+evdns_getaddrinfo_fromhosts(struct evdns_base *base,
+ const char *nodename, struct evutil_addrinfo *hints, ev_uint16_t port,
+ struct evutil_addrinfo **res)
+{
+ int n_found = 0;
+ struct hosts_entry *e;
+ struct evutil_addrinfo *ai=NULL;
+ int f = hints->ai_family;
+
+ EVDNS_LOCK(base);
+ for (e = find_hosts_entry(base, nodename, NULL); e;
+ e = find_hosts_entry(base, nodename, e)) {
+ struct evutil_addrinfo *ai_new;
+ ++n_found;
+ if ((e->addr.sa.sa_family == AF_INET && f == PF_INET6) ||
+ (e->addr.sa.sa_family == AF_INET6 && f == PF_INET))
+ continue;
+ ai_new = evutil_new_addrinfo_(&e->addr.sa, e->addrlen, hints);
+ if (!ai_new) {
+ n_found = 0;
+ goto out;
+ }
+ sockaddr_setport(ai_new->ai_addr, port);
+ ai = evutil_addrinfo_append_(ai, ai_new);
+ }
+ EVDNS_UNLOCK(base);
+out:
+ if (n_found) {
+ /* Note that we return an empty answer if we found entries for
+ * this hostname but none were of the right address type. */
+ *res = ai;
+ return 0;
+ } else {
+ if (ai)
+ evutil_freeaddrinfo(ai);
+ return -1;
+ }
+}
+
+struct evdns_getaddrinfo_request *
+evdns_getaddrinfo(struct evdns_base *dns_base,
+ const char *nodename, const char *servname,
+ const struct evutil_addrinfo *hints_in,
+ evdns_getaddrinfo_cb cb, void *arg)
+{
+ struct evdns_getaddrinfo_request *data;
+ struct evutil_addrinfo hints;
+ struct evutil_addrinfo *res = NULL;
+ int err;
+ int port = 0;
+ int want_cname = 0;
+
+ if (!dns_base) {
+ dns_base = current_base;
+ if (!dns_base) {
+ log(EVDNS_LOG_WARN,
+ "Call to getaddrinfo_async with no "
+ "evdns_base configured.");
+ cb(EVUTIL_EAI_FAIL, NULL, arg); /* ??? better error? */
+ return NULL;
+ }
+ }
+
+ /* If we _must_ answer this immediately, do so. */
+ if ((hints_in && (hints_in->ai_flags & EVUTIL_AI_NUMERICHOST))) {
+ res = NULL;
+ err = evutil_getaddrinfo(nodename, servname, hints_in, &res);
+ cb(err, res, arg);
+ return NULL;
+ }
+
+ if (hints_in) {
+ memcpy(&hints, hints_in, sizeof(hints));
+ } else {
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ }
+
+ evutil_adjust_hints_for_addrconfig_(&hints);
+
+ /* Now try to see if we _can_ answer immediately. */
+ /* (It would be nice to do this by calling getaddrinfo directly, with
+ * AI_NUMERICHOST, on plaforms that have it, but we can't: there isn't
+ * a reliable way to distinguish the "that wasn't a numeric host!" case
+ * from any other EAI_NONAME cases.) */
+ err = evutil_getaddrinfo_common_(nodename, servname, &hints, &res, &port);
+ if (err != EVUTIL_EAI_NEED_RESOLVE) {
+ cb(err, res, arg);
+ return NULL;
+ }
+
+ /* If there is an entry in the hosts file, we should give it now. */
+ if (!evdns_getaddrinfo_fromhosts(dns_base, nodename, &hints, port, &res)) {
+ cb(0, res, arg);
+ return NULL;
+ }
+
+ /* Okay, things are serious now. We're going to need to actually
+ * launch a request.
+ */
+ data = mm_calloc(1,sizeof(struct evdns_getaddrinfo_request));
+ if (!data) {
+ cb(EVUTIL_EAI_MEMORY, NULL, arg);
+ return NULL;
+ }
+
+ memcpy(&data->hints, &hints, sizeof(data->hints));
+ data->port = (ev_uint16_t)port;
+ data->ipv4_request.type = DNS_IPv4_A;
+ data->ipv6_request.type = DNS_IPv6_AAAA;
+ data->user_cb = cb;
+ data->user_data = arg;
+ data->evdns_base = dns_base;
+
+ want_cname = (hints.ai_flags & EVUTIL_AI_CANONNAME);
+
+ /* If we are asked for a PF_UNSPEC address, we launch two requests in
+ * parallel: one for an A address and one for an AAAA address. We
+ * can't send just one request, since many servers only answer one
+ * question per DNS request.
+ *
+ * Once we have the answer to one request, we allow for a short
+ * timeout before we report it, to see if the other one arrives. If
+ * they both show up in time, then we report both the answers.
+ *
+ * If too many addresses of one type time out or fail, we should stop
+ * launching those requests. (XXX we don't do that yet.)
+ */
+
+ if (hints.ai_family != PF_INET6) {
+ log(EVDNS_LOG_DEBUG, "Sending request for %s on ipv4 as %p",
+ nodename, &data->ipv4_request);
+
+ data->ipv4_request.r = evdns_base_resolve_ipv4(dns_base,
+ nodename, 0, evdns_getaddrinfo_gotresolve,
+ &data->ipv4_request);
+ if (want_cname && data->ipv4_request.r)
+ data->ipv4_request.r->current_req->put_cname_in_ptr =
+ &data->cname_result;
+ }
+ if (hints.ai_family != PF_INET) {
+ log(EVDNS_LOG_DEBUG, "Sending request for %s on ipv6 as %p",
+ nodename, &data->ipv6_request);
+
+ data->ipv6_request.r = evdns_base_resolve_ipv6(dns_base,
+ nodename, 0, evdns_getaddrinfo_gotresolve,
+ &data->ipv6_request);
+ if (want_cname && data->ipv6_request.r)
+ data->ipv6_request.r->current_req->put_cname_in_ptr =
+ &data->cname_result;
+ }
+
+ evtimer_assign(&data->timeout, dns_base->event_base,
+ evdns_getaddrinfo_timeout_cb, data);
+
+ if (data->ipv4_request.r || data->ipv6_request.r) {
+ return data;
+ } else {
+ mm_free(data);
+ cb(EVUTIL_EAI_FAIL, NULL, arg);
+ return NULL;
+ }
+}
+
+void
+evdns_getaddrinfo_cancel(struct evdns_getaddrinfo_request *data)
+{
+ EVDNS_LOCK(data->evdns_base);
+ if (data->request_done) {
+ EVDNS_UNLOCK(data->evdns_base);
+ return;
+ }
+ event_del(&data->timeout);
+ data->user_canceled = 1;
+ if (data->ipv4_request.r)
+ evdns_cancel_request(data->evdns_base, data->ipv4_request.r);
+ if (data->ipv6_request.r)
+ evdns_cancel_request(data->evdns_base, data->ipv6_request.r);
+ EVDNS_UNLOCK(data->evdns_base);
+}
diff --git a/libs/libevent/src/event-internal.h b/libs/libevent/src/event-internal.h
new file mode 100644
index 0000000000..66dcfc329c
--- /dev/null
+++ b/libs/libevent/src/event-internal.h
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT_INTERNAL_H_INCLUDED_
+#define EVENT_INTERNAL_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <time.h>
+#include <sys/queue.h>
+#include "event2/event_struct.h"
+#include "minheap-internal.h"
+#include "evsignal-internal.h"
+#include "mm-internal.h"
+#include "defer-internal.h"
+
+/* map union members back */
+
+/* mutually exclusive */
+#define ev_signal_next ev_.ev_signal.ev_signal_next
+#define ev_io_next ev_.ev_io.ev_io_next
+#define ev_io_timeout ev_.ev_io.ev_timeout
+
+/* used only by signals */
+#define ev_ncalls ev_.ev_signal.ev_ncalls
+#define ev_pncalls ev_.ev_signal.ev_pncalls
+
+#define ev_pri ev_evcallback.evcb_pri
+#define ev_flags ev_evcallback.evcb_flags
+#define ev_closure ev_evcallback.evcb_closure
+#define ev_callback ev_evcallback.evcb_cb_union.evcb_callback
+#define ev_arg ev_evcallback.evcb_arg
+
+/** @name Event closure codes
+
+ Possible values for evcb_closure in struct event_callback
+
+ @{
+ */
+/** A regular event. Uses the evcb_callback callback */
+#define EV_CLOSURE_EVENT 0
+/** A signal event. Uses the evcb_callback callback */
+#define EV_CLOSURE_EVENT_SIGNAL 1
+/** A persistent non-signal event. Uses the evcb_callback callback */
+#define EV_CLOSURE_EVENT_PERSIST 2
+/** A simple callback. Uses the evcb_selfcb callback. */
+#define EV_CLOSURE_CB_SELF 3
+/** A finalizing callback. Uses the evcb_cbfinalize callback. */
+#define EV_CLOSURE_CB_FINALIZE 4
+/** A finalizing event. Uses the evcb_evfinalize callback. */
+#define EV_CLOSURE_EVENT_FINALIZE 5
+/** A finalizing event that should get freed after. Uses the evcb_evfinalize
+ * callback. */
+#define EV_CLOSURE_EVENT_FINALIZE_FREE 6
+/** @} */
+
+/** Structure to define the backend of a given event_base. */
+struct eventop {
+ /** The name of this backend. */
+ const char *name;
+ /** Function to set up an event_base to use this backend. It should
+ * create a new structure holding whatever information is needed to
+ * run the backend, and return it. The returned pointer will get
+ * stored by event_init into the event_base.evbase field. On failure,
+ * this function should return NULL. */
+ void *(*init)(struct event_base *);
+ /** Enable reading/writing on a given fd or signal. 'events' will be
+ * the events that we're trying to enable: one or more of EV_READ,
+ * EV_WRITE, EV_SIGNAL, and EV_ET. 'old' will be those events that
+ * were enabled on this fd previously. 'fdinfo' will be a structure
+ * associated with the fd by the evmap; its size is defined by the
+ * fdinfo field below. It will be set to 0 the first time the fd is
+ * added. The function should return 0 on success and -1 on error.
+ */
+ int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
+ /** As "add", except 'events' contains the events we mean to disable. */
+ int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
+ /** Function to implement the core of an event loop. It must see which
+ added events are ready, and cause event_active to be called for each
+ active event (usually via event_io_active or such). It should
+ return 0 on success and -1 on error.
+ */
+ int (*dispatch)(struct event_base *, struct timeval *);
+ /** Function to clean up and free our data from the event_base. */
+ void (*dealloc)(struct event_base *);
+ /** Flag: set if we need to reinitialize the event base after we fork.
+ */
+ int need_reinit;
+ /** Bit-array of supported event_method_features that this backend can
+ * provide. */
+ enum event_method_feature features;
+ /** Length of the extra information we should record for each fd that
+ has one or more active events. This information is recorded
+ as part of the evmap entry for each fd, and passed as an argument
+ to the add and del functions above.
+ */
+ size_t fdinfo_len;
+};
+
+#ifdef _WIN32
+/* If we're on win32, then file descriptors are not nice low densely packed
+ integers. Instead, they are pointer-like windows handles, and we want to
+ use a hashtable instead of an array to map fds to events.
+*/
+#define EVMAP_USE_HT
+#endif
+
+/* #define HT_CACHE_HASH_VALS */
+
+#ifdef EVMAP_USE_HT
+#define HT_NO_CACHE_HASH_VALUES
+#include "ht-internal.h"
+struct event_map_entry;
+HT_HEAD(event_io_map, event_map_entry);
+#else
+#define event_io_map event_signal_map
+#endif
+
+/* Used to map signal numbers to a list of events. If EVMAP_USE_HT is not
+ defined, this structure is also used as event_io_map, which maps fds to a
+ list of events.
+*/
+struct event_signal_map {
+ /* An array of evmap_io * or of evmap_signal *; empty entries are
+ * set to NULL. */
+ void **entries;
+ /* The number of entries available in entries */
+ int nentries;
+};
+
+/* A list of events waiting on a given 'common' timeout value. Ordinarily,
+ * events waiting for a timeout wait on a minheap. Sometimes, however, a
+ * queue can be faster.
+ **/
+struct common_timeout_list {
+ /* List of events currently waiting in the queue. */
+ struct event_list events;
+ /* 'magic' timeval used to indicate the duration of events in this
+ * queue. */
+ struct timeval duration;
+ /* Event that triggers whenever one of the events in the queue is
+ * ready to activate */
+ struct event timeout_event;
+ /* The event_base that this timeout list is part of */
+ struct event_base *base;
+};
+
+/** Mask used to get the real tv_usec value from a common timeout. */
+#define COMMON_TIMEOUT_MICROSECONDS_MASK 0x000fffff
+
+struct event_change;
+
+/* List of 'changes' since the last call to eventop.dispatch. Only maintained
+ * if the backend is using changesets. */
+struct event_changelist {
+ struct event_change *changes;
+ int n_changes;
+ int changes_size;
+};
+
+#ifndef EVENT__DISABLE_DEBUG_MODE
+/* Global internal flag: set to one if debug mode is on. */
+extern int event_debug_mode_on_;
+#define EVENT_DEBUG_MODE_IS_ON() (event_debug_mode_on_)
+#else
+#define EVENT_DEBUG_MODE_IS_ON() (0)
+#endif
+
+TAILQ_HEAD(evcallback_list, event_callback);
+
+/* Sets up an event for processing once */
+struct event_once {
+ LIST_ENTRY(event_once) next_once;
+ struct event ev;
+
+ void (*cb)(evutil_socket_t, short, void *);
+ void *arg;
+};
+
+struct event_base {
+ /** Function pointers and other data to describe this event_base's
+ * backend. */
+ const struct eventop *evsel;
+ /** Pointer to backend-specific data. */
+ void *evbase;
+
+ /** List of changes to tell backend about at next dispatch. Only used
+ * by the O(1) backends. */
+ struct event_changelist changelist;
+
+ /** Function pointers used to describe the backend that this event_base
+ * uses for signals */
+ const struct eventop *evsigsel;
+ /** Data to implement the common signal handelr code. */
+ struct evsig_info sig;
+
+ /** Number of virtual events */
+ int virtual_event_count;
+ /** Maximum number of virtual events active */
+ int virtual_event_count_max;
+ /** Number of total events added to this event_base */
+ int event_count;
+ /** Maximum number of total events added to this event_base */
+ int event_count_max;
+ /** Number of total events active in this event_base */
+ int event_count_active;
+ /** Maximum number of total events active in this event_base */
+ int event_count_active_max;
+
+ /** Set if we should terminate the loop once we're done processing
+ * events. */
+ int event_gotterm;
+ /** Set if we should terminate the loop immediately */
+ int event_break;
+ /** Set if we should start a new instance of the loop immediately. */
+ int event_continue;
+
+ /** The currently running priority of events */
+ int event_running_priority;
+
+ /** Set if we're running the event_base_loop function, to prevent
+ * reentrant invocation. */
+ int running_loop;
+
+ /** Set to the number of deferred_cbs we've made 'active' in the
+ * loop. This is a hack to prevent starvation; it would be smarter
+ * to just use event_config_set_max_dispatch_interval's max_callbacks
+ * feature */
+ int n_deferreds_queued;
+
+ /* Active event management. */
+ /** An array of nactivequeues queues for active event_callbacks (ones
+ * that have triggered, and whose callbacks need to be called). Low
+ * priority numbers are more important, and stall higher ones.
+ */
+ struct evcallback_list *activequeues;
+ /** The length of the activequeues array */
+ int nactivequeues;
+ /** A list of event_callbacks that should become active the next time
+ * we process events, but not this time. */
+ struct evcallback_list active_later_queue;
+
+ /* common timeout logic */
+
+ /** An array of common_timeout_list* for all of the common timeout
+ * values we know. */
+ struct common_timeout_list **common_timeout_queues;
+ /** The number of entries used in common_timeout_queues */
+ int n_common_timeouts;
+ /** The total size of common_timeout_queues. */
+ int n_common_timeouts_allocated;
+
+ /** Mapping from file descriptors to enabled (added) events */
+ struct event_io_map io;
+
+ /** Mapping from signal numbers to enabled (added) events. */
+ struct event_signal_map sigmap;
+
+ /** Priority queue of events with timeouts. */
+ struct min_heap timeheap;
+
+ /** Stored timeval: used to avoid calling gettimeofday/clock_gettime
+ * too often. */
+ struct timeval tv_cache;
+
+ struct evutil_monotonic_timer monotonic_timer;
+
+ /** Difference between internal time (maybe from clock_gettime) and
+ * gettimeofday. */
+ struct timeval tv_clock_diff;
+ /** Second in which we last updated tv_clock_diff, in monotonic time. */
+ time_t last_updated_clock_diff;
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ /* threading support */
+ /** The thread currently running the event_loop for this base */
+ unsigned long th_owner_id;
+ /** A lock to prevent conflicting accesses to this event_base */
+ void *th_base_lock;
+ /** A condition that gets signalled when we're done processing an
+ * event with waiters on it. */
+ void *current_event_cond;
+ /** Number of threads blocking on current_event_cond. */
+ int current_event_waiters;
+#endif
+ /** The event whose callback is executing right now */
+ struct event_callback *current_event;
+
+#ifdef _WIN32
+ /** IOCP support structure, if IOCP is enabled. */
+ struct event_iocp_port *iocp;
+#endif
+
+ /** Flags that this base was configured with */
+ enum event_base_config_flag flags;
+
+ struct timeval max_dispatch_time;
+ int max_dispatch_callbacks;
+ int limit_callbacks_after_prio;
+
+ /* Notify main thread to wake up break, etc. */
+ /** True if the base already has a pending notify, and we don't need
+ * to add any more. */
+ int is_notify_pending;
+ /** A socketpair used by some th_notify functions to wake up the main
+ * thread. */
+ evutil_socket_t th_notify_fd[2];
+ /** An event used by some th_notify functions to wake up the main
+ * thread. */
+ struct event th_notify;
+ /** A function used to wake up the main thread from another thread. */
+ int (*th_notify_fn)(struct event_base *base);
+
+ /** Saved seed for weak random number generator. Some backends use
+ * this to produce fairness among sockets. Protected by th_base_lock. */
+ struct evutil_weakrand_state weakrand_seed;
+
+ /** List of event_onces that have not yet fired. */
+ LIST_HEAD(once_event_list, event_once) once_events;
+
+};
+
+struct event_config_entry {
+ TAILQ_ENTRY(event_config_entry) next;
+
+ const char *avoid_method;
+};
+
+/** Internal structure: describes the configuration we want for an event_base
+ * that we're about to allocate. */
+struct event_config {
+ TAILQ_HEAD(event_configq, event_config_entry) entries;
+
+ int n_cpus_hint;
+ struct timeval max_dispatch_interval;
+ int max_dispatch_callbacks;
+ int limit_callbacks_after_prio;
+ enum event_method_feature require_features;
+ enum event_base_config_flag flags;
+};
+
+/* Internal use only: Functions that might be missing from <sys/queue.h> */
+#ifndef TAILQ_FIRST
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+#endif
+#ifndef TAILQ_END
+#define TAILQ_END(head) NULL
+#endif
+#ifndef TAILQ_NEXT
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+#endif
+
+#ifndef TAILQ_FOREACH
+#define TAILQ_FOREACH(var, head, field) \
+ for ((var) = TAILQ_FIRST(head); \
+ (var) != TAILQ_END(head); \
+ (var) = TAILQ_NEXT(var, field))
+#endif
+
+#ifndef TAILQ_INSERT_BEFORE
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
+} while (0)
+#endif
+
+#define N_ACTIVE_CALLBACKS(base) \
+ ((base)->event_count_active)
+
+int evsig_set_handler_(struct event_base *base, int evsignal,
+ void (*fn)(int));
+int evsig_restore_handler_(struct event_base *base, int evsignal);
+
+int event_add_nolock_(struct event *ev,
+ const struct timeval *tv, int tv_is_absolute);
+/** Argument for event_del_nolock_. Tells event_del not to block on the event
+ * if it's running in another thread. */
+#define EVENT_DEL_NOBLOCK 0
+/** Argument for event_del_nolock_. Tells event_del to block on the event
+ * if it's running in another thread, regardless of its value for EV_FINALIZE
+ */
+#define EVENT_DEL_BLOCK 1
+/** Argument for event_del_nolock_. Tells event_del to block on the event
+ * if it is running in another thread and it doesn't have EV_FINALIZE set.
+ */
+#define EVENT_DEL_AUTOBLOCK 2
+/** Argument for event_del_nolock_. Tells event_del to procede even if the
+ * event is set up for finalization rather for regular use.*/
+#define EVENT_DEL_EVEN_IF_FINALIZING 3
+int event_del_nolock_(struct event *ev, int blocking);
+int event_remove_timer_nolock_(struct event *ev);
+
+void event_active_nolock_(struct event *ev, int res, short count);
+int event_callback_activate_(struct event_base *, struct event_callback *);
+int event_callback_activate_nolock_(struct event_base *, struct event_callback *);
+int event_callback_cancel_(struct event_base *base,
+ struct event_callback *evcb);
+
+void event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *));
+void event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *));
+int event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcb, void (*cb)(struct event_callback *, void *));
+
+
+void event_active_later_(struct event *ev, int res);
+void event_active_later_nolock_(struct event *ev, int res);
+int event_callback_activate_later_nolock_(struct event_base *base,
+ struct event_callback *evcb);
+int event_callback_cancel_nolock_(struct event_base *base,
+ struct event_callback *evcb, int even_if_finalizing);
+void event_callback_init_(struct event_base *base,
+ struct event_callback *cb);
+
+/* FIXME document. */
+void event_base_add_virtual_(struct event_base *base);
+void event_base_del_virtual_(struct event_base *base);
+
+/** For debugging: unless assertions are disabled, verify the referential
+ integrity of the internal data structures of 'base'. This operation can
+ be expensive.
+
+ Returns on success; aborts on failure.
+*/
+void event_base_assert_ok_(struct event_base *base);
+void event_base_assert_ok_nolock_(struct event_base *base);
+
+
+/* Helper function: Call 'fn' exactly once every inserted or active event in
+ * the event_base 'base'.
+ *
+ * If fn returns 0, continue on to the next event. Otherwise, return the same
+ * value that fn returned.
+ *
+ * Requires that 'base' be locked.
+ */
+int event_base_foreach_event_nolock_(struct event_base *base,
+ event_base_foreach_event_cb cb, void *arg);
+
+/* Cleanup function to reset debug mode during shutdown.
+ *
+ * Calling this function doesn't mean it'll be possible to re-enable
+ * debug mode if any events were added.
+ */
+void event_disable_debug_mode(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT_INTERNAL_H_INCLUDED_ */
diff --git a/libs/libevent/src/event.c b/libs/libevent/src/event.c
new file mode 100644
index 0000000000..503003e249
--- /dev/null
+++ b/libs/libevent/src/event.c
@@ -0,0 +1,3940 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+#include <sys/types.h>
+#if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <ctype.h>
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <time.h>
+#include <limits.h>
+
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "event2/event_compat.h"
+#include "event-internal.h"
+#include "defer-internal.h"
+#include "evthread-internal.h"
+#include "event2/thread.h"
+#include "event2/util.h"
+#include "log-internal.h"
+#include "evmap-internal.h"
+#include "iocp-internal.h"
+#include "changelist-internal.h"
+#define HT_NO_CACHE_HASH_VALUES
+#include "ht-internal.h"
+#include "util-internal.h"
+
+
+#ifdef EVENT__HAVE_WORKING_KQUEUE
+#include "kqueue-internal.h"
+#endif
+
+#ifdef EVENT__HAVE_EVENT_PORTS
+extern const struct eventop evportops;
+#endif
+#ifdef EVENT__HAVE_SELECT
+extern const struct eventop selectops;
+#endif
+#ifdef EVENT__HAVE_POLL
+extern const struct eventop pollops;
+#endif
+#ifdef EVENT__HAVE_EPOLL
+extern const struct eventop epollops;
+#endif
+#ifdef EVENT__HAVE_WORKING_KQUEUE
+extern const struct eventop kqops;
+#endif
+#ifdef EVENT__HAVE_DEVPOLL
+extern const struct eventop devpollops;
+#endif
+#ifdef _WIN32
+extern const struct eventop win32ops;
+#endif
+
+/* Array of backends in order of preference. */
+static const struct eventop *eventops[] = {
+#ifdef EVENT__HAVE_EVENT_PORTS
+ &evportops,
+#endif
+#ifdef EVENT__HAVE_WORKING_KQUEUE
+ &kqops,
+#endif
+#ifdef EVENT__HAVE_EPOLL
+ &epollops,
+#endif
+#ifdef EVENT__HAVE_DEVPOLL
+ &devpollops,
+#endif
+#ifdef EVENT__HAVE_POLL
+ &pollops,
+#endif
+#ifdef EVENT__HAVE_SELECT
+ &selectops,
+#endif
+#ifdef _WIN32
+ &win32ops,
+#endif
+ NULL
+};
+
+/* Global state; deprecated */
+struct event_base *event_global_current_base_ = NULL;
+#define current_base event_global_current_base_
+
+/* Global state */
+
+static void *event_self_cbarg_ptr_ = NULL;
+
+/* Prototypes */
+static void event_queue_insert_active(struct event_base *, struct event_callback *);
+static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
+static void event_queue_insert_timeout(struct event_base *, struct event *);
+static void event_queue_insert_inserted(struct event_base *, struct event *);
+static void event_queue_remove_active(struct event_base *, struct event_callback *);
+static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
+static void event_queue_remove_timeout(struct event_base *, struct event *);
+static void event_queue_remove_inserted(struct event_base *, struct event *);
+static void event_queue_make_later_events_active(struct event_base *base);
+
+static int evthread_make_base_notifiable_nolock_(struct event_base *base);
+static int event_del_(struct event *ev, int blocking);
+
+#ifdef USE_REINSERT_TIMEOUT
+/* This code seems buggy; only turn it on if we find out what the trouble is. */
+static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
+#endif
+
+static int event_haveevents(struct event_base *);
+
+static int event_process_active(struct event_base *);
+
+static int timeout_next(struct event_base *, struct timeval **);
+static void timeout_process(struct event_base *);
+
+static inline void event_signal_closure(struct event_base *, struct event *ev);
+static inline void event_persist_closure(struct event_base *, struct event *ev);
+
+static int evthread_notify_base(struct event_base *base);
+
+static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
+ struct event *ev);
+
+#ifndef EVENT__DISABLE_DEBUG_MODE
+/* These functions implement a hashtable of which 'struct event *' structures
+ * have been setup or added. We don't want to trust the content of the struct
+ * event itself, since we're trying to work through cases where an event gets
+ * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
+ */
+
+struct event_debug_entry {
+ HT_ENTRY(event_debug_entry) node;
+ const struct event *ptr;
+ unsigned added : 1;
+};
+
+static inline unsigned
+hash_debug_entry(const struct event_debug_entry *e)
+{
+ /* We need to do this silliness to convince compilers that we
+ * honestly mean to cast e->ptr to an integer, and discard any
+ * part of it that doesn't fit in an unsigned.
+ */
+ unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
+ /* Our hashtable implementation is pretty sensitive to low bits,
+ * and every struct event is over 64 bytes in size, so we can
+ * just say >>6. */
+ return (u >> 6);
+}
+
+static inline int
+eq_debug_entry(const struct event_debug_entry *a,
+ const struct event_debug_entry *b)
+{
+ return a->ptr == b->ptr;
+}
+
+int event_debug_mode_on_ = 0;
+
+
+#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
+/**
+ * @brief debug mode variable which is set for any function/structure that needs
+ * to be shared across threads (if thread support is enabled).
+ *
+ * When and if evthreads are initialized, this variable will be evaluated,
+ * and if set to something other than zero, this means the evthread setup
+ * functions were called out of order.
+ *
+ * See: "Locks and threading" in the documentation.
+ */
+int event_debug_created_threadable_ctx_ = 0;
+#endif
+
+/* Set if it's too late to enable event_debug_mode. */
+static int event_debug_mode_too_late = 0;
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+static void *event_debug_map_lock_ = NULL;
+#endif
+static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
+ HT_INITIALIZER();
+
+HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
+ eq_debug_entry)
+HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
+ eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
+
+/* Macro: record that ev is now setup (that is, ready for an add) */
+#define event_debug_note_setup_(ev) do { \
+ if (event_debug_mode_on_) { \
+ struct event_debug_entry *dent,find; \
+ find.ptr = (ev); \
+ EVLOCK_LOCK(event_debug_map_lock_, 0); \
+ dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
+ if (dent) { \
+ dent->added = 0; \
+ } else { \
+ dent = mm_malloc(sizeof(*dent)); \
+ if (!dent) \
+ event_err(1, \
+ "Out of memory in debugging code"); \
+ dent->ptr = (ev); \
+ dent->added = 0; \
+ HT_INSERT(event_debug_map, &global_debug_map, dent); \
+ } \
+ EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
+ } \
+ event_debug_mode_too_late = 1; \
+ } while (0)
+/* Macro: record that ev is no longer setup */
+#define event_debug_note_teardown_(ev) do { \
+ if (event_debug_mode_on_) { \
+ struct event_debug_entry *dent,find; \
+ find.ptr = (ev); \
+ EVLOCK_LOCK(event_debug_map_lock_, 0); \
+ dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
+ if (dent) \
+ mm_free(dent); \
+ EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
+ } \
+ event_debug_mode_too_late = 1; \
+ } while (0)
+/* Macro: record that ev is now added */
+#define event_debug_note_add_(ev) do { \
+ if (event_debug_mode_on_) { \
+ struct event_debug_entry *dent,find; \
+ find.ptr = (ev); \
+ EVLOCK_LOCK(event_debug_map_lock_, 0); \
+ dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
+ if (dent) { \
+ dent->added = 1; \
+ } else { \
+ event_errx(EVENT_ERR_ABORT_, \
+ "%s: noting an add on a non-setup event %p" \
+ " (events: 0x%x, fd: "EV_SOCK_FMT \
+ ", flags: 0x%x)", \
+ __func__, (ev), (ev)->ev_events, \
+ EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
+ } \
+ EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
+ } \
+ event_debug_mode_too_late = 1; \
+ } while (0)
+/* Macro: record that ev is no longer added */
+#define event_debug_note_del_(ev) do { \
+ if (event_debug_mode_on_) { \
+ struct event_debug_entry *dent,find; \
+ find.ptr = (ev); \
+ EVLOCK_LOCK(event_debug_map_lock_, 0); \
+ dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
+ if (dent) { \
+ dent->added = 0; \
+ } else { \
+ event_errx(EVENT_ERR_ABORT_, \
+ "%s: noting a del on a non-setup event %p" \
+ " (events: 0x%x, fd: "EV_SOCK_FMT \
+ ", flags: 0x%x)", \
+ __func__, (ev), (ev)->ev_events, \
+ EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
+ } \
+ EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
+ } \
+ event_debug_mode_too_late = 1; \
+ } while (0)
+/* Macro: assert that ev is setup (i.e., okay to add or inspect) */
+#define event_debug_assert_is_setup_(ev) do { \
+ if (event_debug_mode_on_) { \
+ struct event_debug_entry *dent,find; \
+ find.ptr = (ev); \
+ EVLOCK_LOCK(event_debug_map_lock_, 0); \
+ dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
+ if (!dent) { \
+ event_errx(EVENT_ERR_ABORT_, \
+ "%s called on a non-initialized event %p" \
+ " (events: 0x%x, fd: "EV_SOCK_FMT\
+ ", flags: 0x%x)", \
+ __func__, (ev), (ev)->ev_events, \
+ EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
+ } \
+ EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
+ } \
+ } while (0)
+/* Macro: assert that ev is not added (i.e., okay to tear down or set
+ * up again) */
+#define event_debug_assert_not_added_(ev) do { \
+ if (event_debug_mode_on_) { \
+ struct event_debug_entry *dent,find; \
+ find.ptr = (ev); \
+ EVLOCK_LOCK(event_debug_map_lock_, 0); \
+ dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
+ if (dent && dent->added) { \
+ event_errx(EVENT_ERR_ABORT_, \
+ "%s called on an already added event %p" \
+ " (events: 0x%x, fd: "EV_SOCK_FMT", " \
+ "flags: 0x%x)", \
+ __func__, (ev), (ev)->ev_events, \
+ EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
+ } \
+ EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
+ } \
+ } while (0)
+#else
+#define event_debug_note_setup_(ev) \
+ ((void)0)
+#define event_debug_note_teardown_(ev) \
+ ((void)0)
+#define event_debug_note_add_(ev) \
+ ((void)0)
+#define event_debug_note_del_(ev) \
+ ((void)0)
+#define event_debug_assert_is_setup_(ev) \
+ ((void)0)
+#define event_debug_assert_not_added_(ev) \
+ ((void)0)
+#endif
+
+#define EVENT_BASE_ASSERT_LOCKED(base) \
+ EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
+
+/* How often (in seconds) do we check for changes in wall clock time relative
+ * to monotonic time? Set this to -1 for 'never.' */
+#define CLOCK_SYNC_INTERVAL 5
+
+/** Set 'tp' to the current time according to 'base'. We must hold the lock
+ * on 'base'. If there is a cached time, return it. Otherwise, use
+ * clock_gettime or gettimeofday as appropriate to find out the right time.
+ * Return 0 on success, -1 on failure.
+ */
+static int
+gettime(struct event_base *base, struct timeval *tp)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+
+ if (base->tv_cache.tv_sec) {
+ *tp = base->tv_cache;
+ return (0);
+ }
+
+ if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
+ return -1;
+ }
+
+ if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
+ < tp->tv_sec) {
+ struct timeval tv;
+ evutil_gettimeofday(&tv,NULL);
+ evutil_timersub(&tv, tp, &base->tv_clock_diff);
+ base->last_updated_clock_diff = tp->tv_sec;
+ }
+
+ return 0;
+}
+
+int
+event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
+{
+ int r;
+ if (!base) {
+ base = current_base;
+ if (!current_base)
+ return evutil_gettimeofday(tv, NULL);
+ }
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ if (base->tv_cache.tv_sec == 0) {
+ r = evutil_gettimeofday(tv, NULL);
+ } else {
+ evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
+ r = 0;
+ }
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return r;
+}
+
+/** Make 'base' have no current cached time. */
+static inline void
+clear_time_cache(struct event_base *base)
+{
+ base->tv_cache.tv_sec = 0;
+}
+
+/** Replace the cached time in 'base' with the current time. */
+static inline void
+update_time_cache(struct event_base *base)
+{
+ base->tv_cache.tv_sec = 0;
+ if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
+ gettime(base, &base->tv_cache);
+}
+
+int
+event_base_update_cache_time(struct event_base *base)
+{
+
+ if (!base) {
+ base = current_base;
+ if (!current_base)
+ return -1;
+ }
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ if (base->running_loop)
+ update_time_cache(base);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return 0;
+}
+
+static inline struct event *
+event_callback_to_event(struct event_callback *evcb)
+{
+ EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
+ return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
+}
+
+static inline struct event_callback *
+event_to_event_callback(struct event *ev)
+{
+ return &ev->ev_evcallback;
+}
+
+struct event_base *
+event_init(void)
+{
+ struct event_base *base = event_base_new_with_config(NULL);
+
+ if (base == NULL) {
+ event_errx(1, "%s: Unable to construct event_base", __func__);
+ return NULL;
+ }
+
+ current_base = base;
+
+ return (base);
+}
+
+struct event_base *
+event_base_new(void)
+{
+ struct event_base *base = NULL;
+ struct event_config *cfg = event_config_new();
+ if (cfg) {
+ base = event_base_new_with_config(cfg);
+ event_config_free(cfg);
+ }
+ return base;
+}
+
+/** Return true iff 'method' is the name of a method that 'cfg' tells us to
+ * avoid. */
+static int
+event_config_is_avoided_method(const struct event_config *cfg,
+ const char *method)
+{
+ struct event_config_entry *entry;
+
+ TAILQ_FOREACH(entry, &cfg->entries, next) {
+ if (entry->avoid_method != NULL &&
+ strcmp(entry->avoid_method, method) == 0)
+ return (1);
+ }
+
+ return (0);
+}
+
+/** Return true iff 'method' is disabled according to the environment. */
+static int
+event_is_method_disabled(const char *name)
+{
+ char environment[64];
+ int i;
+
+ evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
+ for (i = 8; environment[i] != '\0'; ++i)
+ environment[i] = EVUTIL_TOUPPER_(environment[i]);
+ /* Note that evutil_getenv_() ignores the environment entirely if
+ * we're setuid */
+ return (evutil_getenv_(environment) != NULL);
+}
+
+int
+event_base_get_features(const struct event_base *base)
+{
+ return base->evsel->features;
+}
+
+void
+event_enable_debug_mode(void)
+{
+#ifndef EVENT__DISABLE_DEBUG_MODE
+ if (event_debug_mode_on_)
+ event_errx(1, "%s was called twice!", __func__);
+ if (event_debug_mode_too_late)
+ event_errx(1, "%s must be called *before* creating any events "
+ "or event_bases",__func__);
+
+ event_debug_mode_on_ = 1;
+
+ HT_INIT(event_debug_map, &global_debug_map);
+#endif
+}
+
+void
+event_disable_debug_mode(void)
+{
+#ifndef EVENT__DISABLE_DEBUG_MODE
+ struct event_debug_entry **ent, *victim;
+
+ EVLOCK_LOCK(event_debug_map_lock_, 0);
+ for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
+ victim = *ent;
+ ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
+ mm_free(victim);
+ }
+ HT_CLEAR(event_debug_map, &global_debug_map);
+ EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
+
+ event_debug_mode_on_ = 0;
+#endif
+}
+
+struct event_base *
+event_base_new_with_config(const struct event_config *cfg)
+{
+ int i;
+ struct event_base *base;
+ int should_check_environment;
+
+#ifndef EVENT__DISABLE_DEBUG_MODE
+ event_debug_mode_too_late = 1;
+#endif
+
+ if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
+ event_warn("%s: calloc", __func__);
+ return NULL;
+ }
+
+ if (cfg)
+ base->flags = cfg->flags;
+
+ should_check_environment =
+ !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
+
+ {
+ struct timeval tmp;
+ int precise_time =
+ cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
+ int flags;
+ if (should_check_environment && !precise_time) {
+ precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
+ base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
+ }
+ flags = precise_time ? EV_MONOT_PRECISE : 0;
+ evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
+
+ gettime(base, &tmp);
+ }
+
+ min_heap_ctor_(&base->timeheap);
+
+ base->sig.ev_signal_pair[0] = -1;
+ base->sig.ev_signal_pair[1] = -1;
+ base->th_notify_fd[0] = -1;
+ base->th_notify_fd[1] = -1;
+
+ TAILQ_INIT(&base->active_later_queue);
+
+ evmap_io_initmap_(&base->io);
+ evmap_signal_initmap_(&base->sigmap);
+ event_changelist_init_(&base->changelist);
+
+ base->evbase = NULL;
+
+ if (cfg) {
+ memcpy(&base->max_dispatch_time,
+ &cfg->max_dispatch_interval, sizeof(struct timeval));
+ base->limit_callbacks_after_prio =
+ cfg->limit_callbacks_after_prio;
+ } else {
+ base->max_dispatch_time.tv_sec = -1;
+ base->limit_callbacks_after_prio = 1;
+ }
+ if (cfg && cfg->max_dispatch_callbacks >= 0) {
+ base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
+ } else {
+ base->max_dispatch_callbacks = INT_MAX;
+ }
+ if (base->max_dispatch_callbacks == INT_MAX &&
+ base->max_dispatch_time.tv_sec == -1)
+ base->limit_callbacks_after_prio = INT_MAX;
+
+ for (i = 0; eventops[i] && !base->evbase; i++) {
+ if (cfg != NULL) {
+ /* determine if this backend should be avoided */
+ if (event_config_is_avoided_method(cfg,
+ eventops[i]->name))
+ continue;
+ if ((eventops[i]->features & cfg->require_features)
+ != cfg->require_features)
+ continue;
+ }
+
+ /* also obey the environment variables */
+ if (should_check_environment &&
+ event_is_method_disabled(eventops[i]->name))
+ continue;
+
+ base->evsel = eventops[i];
+
+ base->evbase = base->evsel->init(base);
+ }
+
+ if (base->evbase == NULL) {
+ event_warnx("%s: no event mechanism available",
+ __func__);
+ base->evsel = NULL;
+ event_base_free(base);
+ return NULL;
+ }
+
+ if (evutil_getenv_("EVENT_SHOW_METHOD"))
+ event_msgx("libevent using: %s", base->evsel->name);
+
+ /* allocate a single active event queue */
+ if (event_base_priority_init(base, 1) < 0) {
+ event_base_free(base);
+ return NULL;
+ }
+
+ /* prepare for threading */
+
+#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
+ event_debug_created_threadable_ctx_ = 1;
+#endif
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (EVTHREAD_LOCKING_ENABLED() &&
+ (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
+ int r;
+ EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
+ EVTHREAD_ALLOC_COND(base->current_event_cond);
+ r = evthread_make_base_notifiable(base);
+ if (r<0) {
+ event_warnx("%s: Unable to make base notifiable.", __func__);
+ event_base_free(base);
+ return NULL;
+ }
+ }
+#endif
+
+#ifdef _WIN32
+ if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
+ event_base_start_iocp_(base, cfg->n_cpus_hint);
+#endif
+
+ return (base);
+}
+
+int
+event_base_start_iocp_(struct event_base *base, int n_cpus)
+{
+#ifdef _WIN32
+ if (base->iocp)
+ return 0;
+ base->iocp = event_iocp_port_launch_(n_cpus);
+ if (!base->iocp) {
+ event_warnx("%s: Couldn't launch IOCP", __func__);
+ return -1;
+ }
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+void
+event_base_stop_iocp_(struct event_base *base)
+{
+#ifdef _WIN32
+ int rv;
+
+ if (!base->iocp)
+ return;
+ rv = event_iocp_shutdown_(base->iocp, -1);
+ EVUTIL_ASSERT(rv >= 0);
+ base->iocp = NULL;
+#endif
+}
+
+static int
+event_base_cancel_single_callback_(struct event_base *base,
+ struct event_callback *evcb,
+ int run_finalizers)
+{
+ int result = 0;
+
+ if (evcb->evcb_flags & EVLIST_INIT) {
+ struct event *ev = event_callback_to_event(evcb);
+ if (!(ev->ev_flags & EVLIST_INTERNAL)) {
+ event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
+ result = 1;
+ }
+ } else {
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ event_callback_cancel_nolock_(base, evcb, 1);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ result = 1;
+ }
+
+ if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
+ switch (evcb->evcb_closure) {
+ case EV_CLOSURE_EVENT_FINALIZE:
+ case EV_CLOSURE_EVENT_FINALIZE_FREE: {
+ struct event *ev = event_callback_to_event(evcb);
+ ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
+ if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
+ mm_free(ev);
+ break;
+ }
+ case EV_CLOSURE_CB_FINALIZE:
+ evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
+ break;
+ default:
+ break;
+ }
+ }
+ return result;
+}
+
+static int event_base_free_queues_(struct event_base *base, int run_finalizers)
+{
+ int deleted = 0, i;
+
+ for (i = 0; i < base->nactivequeues; ++i) {
+ struct event_callback *evcb, *next;
+ for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
+ next = TAILQ_NEXT(evcb, evcb_active_next);
+ deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
+ evcb = next;
+ }
+ }
+
+ {
+ struct event_callback *evcb;
+ while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
+ deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
+ }
+ }
+
+ return deleted;
+}
+
+static void
+event_base_free_(struct event_base *base, int run_finalizers)
+{
+ int i, n_deleted=0;
+ struct event *ev;
+ /* XXXX grab the lock? If there is contention when one thread frees
+ * the base, then the contending thread will be very sad soon. */
+
+ /* event_base_free(NULL) is how to free the current_base if we
+ * made it with event_init and forgot to hold a reference to it. */
+ if (base == NULL && current_base)
+ base = current_base;
+ /* Don't actually free NULL. */
+ if (base == NULL) {
+ event_warnx("%s: no base to free", __func__);
+ return;
+ }
+ /* XXX(niels) - check for internal events first */
+
+#ifdef _WIN32
+ event_base_stop_iocp_(base);
+#endif
+
+ /* threading fds if we have them */
+ if (base->th_notify_fd[0] != -1) {
+ event_del(&base->th_notify);
+ EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
+ if (base->th_notify_fd[1] != -1)
+ EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
+ base->th_notify_fd[0] = -1;
+ base->th_notify_fd[1] = -1;
+ event_debug_unassign(&base->th_notify);
+ }
+
+ /* Delete all non-internal events. */
+ evmap_delete_all_(base);
+
+ while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
+ event_del(ev);
+ ++n_deleted;
+ }
+ for (i = 0; i < base->n_common_timeouts; ++i) {
+ struct common_timeout_list *ctl =
+ base->common_timeout_queues[i];
+ event_del(&ctl->timeout_event); /* Internal; doesn't count */
+ event_debug_unassign(&ctl->timeout_event);
+ for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
+ struct event *next = TAILQ_NEXT(ev,
+ ev_timeout_pos.ev_next_with_common_timeout);
+ if (!(ev->ev_flags & EVLIST_INTERNAL)) {
+ event_del(ev);
+ ++n_deleted;
+ }
+ ev = next;
+ }
+ mm_free(ctl);
+ }
+ if (base->common_timeout_queues)
+ mm_free(base->common_timeout_queues);
+
+ for (;;) {
+ /* For finalizers we can register yet another finalizer out from
+ * finalizer, and iff finalizer will be in active_later_queue we can
+ * add finalizer to activequeues, and we will have events in
+ * activequeues after this function returns, which is not what we want
+ * (we even have an assertion for this).
+ *
+ * A simple case is bufferevent with underlying (i.e. filters).
+ */
+ int i = event_base_free_queues_(base, run_finalizers);
+ if (!i) {
+ break;
+ }
+ n_deleted += i;
+ }
+
+ if (n_deleted)
+ event_debug(("%s: %d events were still set in base",
+ __func__, n_deleted));
+
+ while (LIST_FIRST(&base->once_events)) {
+ struct event_once *eonce = LIST_FIRST(&base->once_events);
+ LIST_REMOVE(eonce, next_once);
+ mm_free(eonce);
+ }
+
+ if (base->evsel != NULL && base->evsel->dealloc != NULL)
+ base->evsel->dealloc(base);
+
+ for (i = 0; i < base->nactivequeues; ++i)
+ EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
+
+ EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
+ min_heap_dtor_(&base->timeheap);
+
+ mm_free(base->activequeues);
+
+ evmap_io_clear_(&base->io);
+ evmap_signal_clear_(&base->sigmap);
+ event_changelist_freemem_(&base->changelist);
+
+ EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
+ EVTHREAD_FREE_COND(base->current_event_cond);
+
+ /* If we're freeing current_base, there won't be a current_base. */
+ if (base == current_base)
+ current_base = NULL;
+ mm_free(base);
+}
+
+void
+event_base_free_nofinalize(struct event_base *base)
+{
+ event_base_free_(base, 0);
+}
+
+void
+event_base_free(struct event_base *base)
+{
+ event_base_free_(base, 1);
+}
+
+/* Fake eventop; used to disable the backend temporarily inside event_reinit
+ * so that we can call event_del() on an event without telling the backend.
+ */
+static int
+nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
+ short events, void *fdinfo)
+{
+ return 0;
+}
+const struct eventop nil_eventop = {
+ "nil",
+ NULL, /* init: unused. */
+ NULL, /* add: unused. */
+ nil_backend_del, /* del: used, so needs to be killed. */
+ NULL, /* dispatch: unused. */
+ NULL, /* dealloc: unused. */
+ 0, 0, 0
+};
+
+/* reinitialize the event base after a fork */
+int
+event_reinit(struct event_base *base)
+{
+ const struct eventop *evsel;
+ int res = 0;
+ int was_notifiable = 0;
+ int had_signal_added = 0;
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ evsel = base->evsel;
+
+ /* check if this event mechanism requires reinit on the backend */
+ if (evsel->need_reinit) {
+ /* We're going to call event_del() on our notify events (the
+ * ones that tell about signals and wakeup events). But we
+ * don't actually want to tell the backend to change its
+ * state, since it might still share some resource (a kqueue,
+ * an epoll fd) with the parent process, and we don't want to
+ * delete the fds from _that_ backend, we temporarily stub out
+ * the evsel with a replacement.
+ */
+ base->evsel = &nil_eventop;
+ }
+
+ /* We need to re-create a new signal-notification fd and a new
+ * thread-notification fd. Otherwise, we'll still share those with
+ * the parent process, which would make any notification sent to them
+ * get received by one or both of the event loops, more or less at
+ * random.
+ */
+ if (base->sig.ev_signal_added) {
+ event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
+ event_debug_unassign(&base->sig.ev_signal);
+ memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
+ had_signal_added = 1;
+ base->sig.ev_signal_added = 0;
+ }
+ if (base->sig.ev_signal_pair[0] != -1)
+ EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
+ if (base->sig.ev_signal_pair[1] != -1)
+ EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
+ if (base->th_notify_fn != NULL) {
+ was_notifiable = 1;
+ base->th_notify_fn = NULL;
+ }
+ if (base->th_notify_fd[0] != -1) {
+ event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
+ EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
+ if (base->th_notify_fd[1] != -1)
+ EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
+ base->th_notify_fd[0] = -1;
+ base->th_notify_fd[1] = -1;
+ event_debug_unassign(&base->th_notify);
+ }
+
+ /* Replace the original evsel. */
+ base->evsel = evsel;
+
+ if (evsel->need_reinit) {
+ /* Reconstruct the backend through brute-force, so that we do
+ * not share any structures with the parent process. For some
+ * backends, this is necessary: epoll and kqueue, for
+ * instance, have events associated with a kernel
+ * structure. If didn't reinitialize, we'd share that
+ * structure with the parent process, and any changes made by
+ * the parent would affect our backend's behavior (and vice
+ * versa).
+ */
+ if (base->evsel->dealloc != NULL)
+ base->evsel->dealloc(base);
+ base->evbase = evsel->init(base);
+ if (base->evbase == NULL) {
+ event_errx(1,
+ "%s: could not reinitialize event mechanism",
+ __func__);
+ res = -1;
+ goto done;
+ }
+
+ /* Empty out the changelist (if any): we are starting from a
+ * blank slate. */
+ event_changelist_freemem_(&base->changelist);
+
+ /* Tell the event maps to re-inform the backend about all
+ * pending events. This will make the signal notification
+ * event get re-created if necessary. */
+ if (evmap_reinit_(base) < 0)
+ res = -1;
+ } else {
+ res = evsig_init_(base);
+ if (res == 0 && had_signal_added) {
+ res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
+ if (res == 0)
+ base->sig.ev_signal_added = 1;
+ }
+ }
+
+ /* If we were notifiable before, and nothing just exploded, become
+ * notifiable again. */
+ if (was_notifiable && res == 0)
+ res = evthread_make_base_notifiable_nolock_(base);
+
+done:
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return (res);
+}
+
+/* Get the monotonic time for this event_base' timer */
+int
+event_gettime_monotonic(struct event_base *base, struct timeval *tv)
+{
+ int rv = -1;
+
+ if (base && tv) {
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ }
+
+ return rv;
+}
+
+const char **
+event_get_supported_methods(void)
+{
+ static const char **methods = NULL;
+ const struct eventop **method;
+ const char **tmp;
+ int i = 0, k;
+
+ /* count all methods */
+ for (method = &eventops[0]; *method != NULL; ++method) {
+ ++i;
+ }
+
+ /* allocate one more than we need for the NULL pointer */
+ tmp = mm_calloc((i + 1), sizeof(char *));
+ if (tmp == NULL)
+ return (NULL);
+
+ /* populate the array with the supported methods */
+ for (k = 0, i = 0; eventops[k] != NULL; ++k) {
+ tmp[i++] = eventops[k]->name;
+ }
+ tmp[i] = NULL;
+
+ if (methods != NULL)
+ mm_free((char**)methods);
+
+ methods = tmp;
+
+ return (methods);
+}
+
+struct event_config *
+event_config_new(void)
+{
+ struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
+
+ if (cfg == NULL)
+ return (NULL);
+
+ TAILQ_INIT(&cfg->entries);
+ cfg->max_dispatch_interval.tv_sec = -1;
+ cfg->max_dispatch_callbacks = INT_MAX;
+ cfg->limit_callbacks_after_prio = 1;
+
+ return (cfg);
+}
+
+static void
+event_config_entry_free(struct event_config_entry *entry)
+{
+ if (entry->avoid_method != NULL)
+ mm_free((char *)entry->avoid_method);
+ mm_free(entry);
+}
+
+void
+event_config_free(struct event_config *cfg)
+{
+ struct event_config_entry *entry;
+
+ while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
+ TAILQ_REMOVE(&cfg->entries, entry, next);
+ event_config_entry_free(entry);
+ }
+ mm_free(cfg);
+}
+
+int
+event_config_set_flag(struct event_config *cfg, int flag)
+{
+ if (!cfg)
+ return -1;
+ cfg->flags |= flag;
+ return 0;
+}
+
+int
+event_config_avoid_method(struct event_config *cfg, const char *method)
+{
+ struct event_config_entry *entry = mm_malloc(sizeof(*entry));
+ if (entry == NULL)
+ return (-1);
+
+ if ((entry->avoid_method = mm_strdup(method)) == NULL) {
+ mm_free(entry);
+ return (-1);
+ }
+
+ TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
+
+ return (0);
+}
+
+int
+event_config_require_features(struct event_config *cfg,
+ int features)
+{
+ if (!cfg)
+ return (-1);
+ cfg->require_features = features;
+ return (0);
+}
+
+int
+event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
+{
+ if (!cfg)
+ return (-1);
+ cfg->n_cpus_hint = cpus;
+ return (0);
+}
+
+int
+event_config_set_max_dispatch_interval(struct event_config *cfg,
+ const struct timeval *max_interval, int max_callbacks, int min_priority)
+{
+ if (max_interval)
+ memcpy(&cfg->max_dispatch_interval, max_interval,
+ sizeof(struct timeval));
+ else
+ cfg->max_dispatch_interval.tv_sec = -1;
+ cfg->max_dispatch_callbacks =
+ max_callbacks >= 0 ? max_callbacks : INT_MAX;
+ if (min_priority < 0)
+ min_priority = 0;
+ cfg->limit_callbacks_after_prio = min_priority;
+ return (0);
+}
+
+int
+event_priority_init(int npriorities)
+{
+ return event_base_priority_init(current_base, npriorities);
+}
+
+int
+event_base_priority_init(struct event_base *base, int npriorities)
+{
+ int i, r;
+ r = -1;
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
+ || npriorities >= EVENT_MAX_PRIORITIES)
+ goto err;
+
+ if (npriorities == base->nactivequeues)
+ goto ok;
+
+ if (base->nactivequeues) {
+ mm_free(base->activequeues);
+ base->nactivequeues = 0;
+ }
+
+ /* Allocate our priority queues */
+ base->activequeues = (struct evcallback_list *)
+ mm_calloc(npriorities, sizeof(struct evcallback_list));
+ if (base->activequeues == NULL) {
+ event_warn("%s: calloc", __func__);
+ goto err;
+ }
+ base->nactivequeues = npriorities;
+
+ for (i = 0; i < base->nactivequeues; ++i) {
+ TAILQ_INIT(&base->activequeues[i]);
+ }
+
+ok:
+ r = 0;
+err:
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return (r);
+}
+
+int
+event_base_get_npriorities(struct event_base *base)
+{
+
+ int n;
+ if (base == NULL)
+ base = current_base;
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ n = base->nactivequeues;
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return (n);
+}
+
+int
+event_base_get_num_events(struct event_base *base, unsigned int type)
+{
+ int r = 0;
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ if (type & EVENT_BASE_COUNT_ACTIVE)
+ r += base->event_count_active;
+
+ if (type & EVENT_BASE_COUNT_VIRTUAL)
+ r += base->virtual_event_count;
+
+ if (type & EVENT_BASE_COUNT_ADDED)
+ r += base->event_count;
+
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+ return r;
+}
+
+int
+event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
+{
+ int r = 0;
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ if (type & EVENT_BASE_COUNT_ACTIVE) {
+ r += base->event_count_active_max;
+ if (clear)
+ base->event_count_active_max = 0;
+ }
+
+ if (type & EVENT_BASE_COUNT_VIRTUAL) {
+ r += base->virtual_event_count_max;
+ if (clear)
+ base->virtual_event_count_max = 0;
+ }
+
+ if (type & EVENT_BASE_COUNT_ADDED) {
+ r += base->event_count_max;
+ if (clear)
+ base->event_count_max = 0;
+ }
+
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+ return r;
+}
+
+/* Returns true iff we're currently watching any events. */
+static int
+event_haveevents(struct event_base *base)
+{
+ /* Caller must hold th_base_lock */
+ return (base->virtual_event_count > 0 || base->event_count > 0);
+}
+
+/* "closure" function called when processing active signal events */
+static inline void
+event_signal_closure(struct event_base *base, struct event *ev)
+{
+ short ncalls;
+ int should_break;
+
+ /* Allows deletes to work */
+ ncalls = ev->ev_ncalls;
+ if (ncalls != 0)
+ ev->ev_pncalls = &ncalls;
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ while (ncalls) {
+ ncalls--;
+ ev->ev_ncalls = ncalls;
+ if (ncalls == 0)
+ ev->ev_pncalls = NULL;
+ (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ should_break = base->event_break;
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+ if (should_break) {
+ if (ncalls != 0)
+ ev->ev_pncalls = NULL;
+ return;
+ }
+ }
+}
+
+/* Common timeouts are special timeouts that are handled as queues rather than
+ * in the minheap. This is more efficient than the minheap if we happen to
+ * know that we're going to get several thousands of timeout events all with
+ * the same timeout value.
+ *
+ * Since all our timeout handling code assumes timevals can be copied,
+ * assigned, etc, we can't use "magic pointer" to encode these common
+ * timeouts. Searching through a list to see if every timeout is common could
+ * also get inefficient. Instead, we take advantage of the fact that tv_usec
+ * is 32 bits long, but only uses 20 of those bits (since it can never be over
+ * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
+ * of index into the event_base's aray of common timeouts.
+ */
+
+#define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
+#define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
+#define COMMON_TIMEOUT_IDX_SHIFT 20
+#define COMMON_TIMEOUT_MASK 0xf0000000
+#define COMMON_TIMEOUT_MAGIC 0x50000000
+
+#define COMMON_TIMEOUT_IDX(tv) \
+ (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
+
+/** Return true iff if 'tv' is a common timeout in 'base' */
+static inline int
+is_common_timeout(const struct timeval *tv,
+ const struct event_base *base)
+{
+ int idx;
+ if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
+ return 0;
+ idx = COMMON_TIMEOUT_IDX(tv);
+ return idx < base->n_common_timeouts;
+}
+
+/* True iff tv1 and tv2 have the same common-timeout index, or if neither
+ * one is a common timeout. */
+static inline int
+is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
+{
+ return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
+ (tv2->tv_usec & ~MICROSECONDS_MASK);
+}
+
+/** Requires that 'tv' is a common timeout. Return the corresponding
+ * common_timeout_list. */
+static inline struct common_timeout_list *
+get_common_timeout_list(struct event_base *base, const struct timeval *tv)
+{
+ return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
+}
+
+#if 0
+static inline int
+common_timeout_ok(const struct timeval *tv,
+ struct event_base *base)
+{
+ const struct timeval *expect =
+ &get_common_timeout_list(base, tv)->duration;
+ return tv->tv_sec == expect->tv_sec &&
+ tv->tv_usec == expect->tv_usec;
+}
+#endif
+
+/* Add the timeout for the first event in given common timeout list to the
+ * event_base's minheap. */
+static void
+common_timeout_schedule(struct common_timeout_list *ctl,
+ const struct timeval *now, struct event *head)
+{
+ struct timeval timeout = head->ev_timeout;
+ timeout.tv_usec &= MICROSECONDS_MASK;
+ event_add_nolock_(&ctl->timeout_event, &timeout, 1);
+}
+
+/* Callback: invoked when the timeout for a common timeout queue triggers.
+ * This means that (at least) the first event in that queue should be run,
+ * and the timeout should be rescheduled if there are more events. */
+static void
+common_timeout_callback(evutil_socket_t fd, short what, void *arg)
+{
+ struct timeval now;
+ struct common_timeout_list *ctl = arg;
+ struct event_base *base = ctl->base;
+ struct event *ev = NULL;
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ gettime(base, &now);
+ while (1) {
+ ev = TAILQ_FIRST(&ctl->events);
+ if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
+ (ev->ev_timeout.tv_sec == now.tv_sec &&
+ (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
+ break;
+ event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
+ event_active_nolock_(ev, EV_TIMEOUT, 1);
+ }
+ if (ev)
+ common_timeout_schedule(ctl, &now, ev);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+#define MAX_COMMON_TIMEOUTS 256
+
+const struct timeval *
+event_base_init_common_timeout(struct event_base *base,
+ const struct timeval *duration)
+{
+ int i;
+ struct timeval tv;
+ const struct timeval *result=NULL;
+ struct common_timeout_list *new_ctl;
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ if (duration->tv_usec > 1000000) {
+ memcpy(&tv, duration, sizeof(struct timeval));
+ if (is_common_timeout(duration, base))
+ tv.tv_usec &= MICROSECONDS_MASK;
+ tv.tv_sec += tv.tv_usec / 1000000;
+ tv.tv_usec %= 1000000;
+ duration = &tv;
+ }
+ for (i = 0; i < base->n_common_timeouts; ++i) {
+ const struct common_timeout_list *ctl =
+ base->common_timeout_queues[i];
+ if (duration->tv_sec == ctl->duration.tv_sec &&
+ duration->tv_usec ==
+ (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
+ EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
+ result = &ctl->duration;
+ goto done;
+ }
+ }
+ if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
+ event_warnx("%s: Too many common timeouts already in use; "
+ "we only support %d per event_base", __func__,
+ MAX_COMMON_TIMEOUTS);
+ goto done;
+ }
+ if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
+ int n = base->n_common_timeouts < 16 ? 16 :
+ base->n_common_timeouts*2;
+ struct common_timeout_list **newqueues =
+ mm_realloc(base->common_timeout_queues,
+ n*sizeof(struct common_timeout_queue *));
+ if (!newqueues) {
+ event_warn("%s: realloc",__func__);
+ goto done;
+ }
+ base->n_common_timeouts_allocated = n;
+ base->common_timeout_queues = newqueues;
+ }
+ new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
+ if (!new_ctl) {
+ event_warn("%s: calloc",__func__);
+ goto done;
+ }
+ TAILQ_INIT(&new_ctl->events);
+ new_ctl->duration.tv_sec = duration->tv_sec;
+ new_ctl->duration.tv_usec =
+ duration->tv_usec | COMMON_TIMEOUT_MAGIC |
+ (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
+ evtimer_assign(&new_ctl->timeout_event, base,
+ common_timeout_callback, new_ctl);
+ new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
+ event_priority_set(&new_ctl->timeout_event, 0);
+ new_ctl->base = base;
+ base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
+ result = &new_ctl->duration;
+
+done:
+ if (result)
+ EVUTIL_ASSERT(is_common_timeout(result, base));
+
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return result;
+}
+
+/* Closure function invoked when we're activating a persistent event. */
+static inline void
+event_persist_closure(struct event_base *base, struct event *ev)
+{
+ void (*evcb_callback)(evutil_socket_t, short, void *);
+
+ // Other fields of *ev that must be stored before executing
+ evutil_socket_t evcb_fd;
+ short evcb_res;
+ void *evcb_arg;
+
+ /* reschedule the persistent event if we have a timeout. */
+ if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
+ /* If there was a timeout, we want it to run at an interval of
+ * ev_io_timeout after the last time it was _scheduled_ for,
+ * not ev_io_timeout after _now_. If it fired for another
+ * reason, though, the timeout ought to start ticking _now_. */
+ struct timeval run_at, relative_to, delay, now;
+ ev_uint32_t usec_mask = 0;
+ EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
+ &ev->ev_io_timeout));
+ gettime(base, &now);
+ if (is_common_timeout(&ev->ev_timeout, base)) {
+ delay = ev->ev_io_timeout;
+ usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
+ delay.tv_usec &= MICROSECONDS_MASK;
+ if (ev->ev_res & EV_TIMEOUT) {
+ relative_to = ev->ev_timeout;
+ relative_to.tv_usec &= MICROSECONDS_MASK;
+ } else {
+ relative_to = now;
+ }
+ } else {
+ delay = ev->ev_io_timeout;
+ if (ev->ev_res & EV_TIMEOUT) {
+ relative_to = ev->ev_timeout;
+ } else {
+ relative_to = now;
+ }
+ }
+ evutil_timeradd(&relative_to, &delay, &run_at);
+ if (evutil_timercmp(&run_at, &now, <)) {
+ /* Looks like we missed at least one invocation due to
+ * a clock jump, not running the event loop for a
+ * while, really slow callbacks, or
+ * something. Reschedule relative to now.
+ */
+ evutil_timeradd(&now, &delay, &run_at);
+ }
+ run_at.tv_usec |= usec_mask;
+ event_add_nolock_(ev, &run_at, 1);
+ }
+
+ // Save our callback before we release the lock
+ evcb_callback = ev->ev_callback;
+ evcb_fd = ev->ev_fd;
+ evcb_res = ev->ev_res;
+ evcb_arg = ev->ev_arg;
+
+ // Release the lock
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+ // Execute the callback
+ (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
+}
+
+/*
+ Helper for event_process_active to process all the events in a single queue,
+ releasing the lock as we go. This function requires that the lock be held
+ when it's invoked. Returns -1 if we get a signal or an event_break that
+ means we should stop processing any active events now. Otherwise returns
+ the number of non-internal event_callbacks that we processed.
+*/
+static int
+event_process_active_single_queue(struct event_base *base,
+ struct evcallback_list *activeq,
+ int max_to_process, const struct timeval *endtime)
+{
+ struct event_callback *evcb;
+ int count = 0;
+
+ EVUTIL_ASSERT(activeq != NULL);
+
+ for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
+ struct event *ev=NULL;
+ if (evcb->evcb_flags & EVLIST_INIT) {
+ ev = event_callback_to_event(evcb);
+
+ if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
+ event_queue_remove_active(base, evcb);
+ else
+ event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
+ event_debug((
+ "event_process_active: event: %p, %s%s%scall %p",
+ ev,
+ ev->ev_res & EV_READ ? "EV_READ " : " ",
+ ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
+ ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
+ ev->ev_callback));
+ } else {
+ event_queue_remove_active(base, evcb);
+ event_debug(("event_process_active: event_callback %p, "
+ "closure %d, call %p",
+ evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
+ }
+
+ if (!(evcb->evcb_flags & EVLIST_INTERNAL))
+ ++count;
+
+
+ base->current_event = evcb;
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ base->current_event_waiters = 0;
+#endif
+
+ switch (evcb->evcb_closure) {
+ case EV_CLOSURE_EVENT_SIGNAL:
+ EVUTIL_ASSERT(ev != NULL);
+ event_signal_closure(base, ev);
+ break;
+ case EV_CLOSURE_EVENT_PERSIST:
+ EVUTIL_ASSERT(ev != NULL);
+ event_persist_closure(base, ev);
+ break;
+ case EV_CLOSURE_EVENT: {
+ void (*evcb_callback)(evutil_socket_t, short, void *);
+ EVUTIL_ASSERT(ev != NULL);
+ evcb_callback = *ev->ev_callback;
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ evcb_callback(ev->ev_fd, ev->ev_res, ev->ev_arg);
+ }
+ break;
+ case EV_CLOSURE_CB_SELF: {
+ void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ evcb_selfcb(evcb, evcb->evcb_arg);
+ }
+ break;
+ case EV_CLOSURE_EVENT_FINALIZE:
+ case EV_CLOSURE_EVENT_FINALIZE_FREE: {
+ void (*evcb_evfinalize)(struct event *, void *);
+ int evcb_closure = evcb->evcb_closure;
+ EVUTIL_ASSERT(ev != NULL);
+ base->current_event = NULL;
+ evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
+ EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ evcb_evfinalize(ev, ev->ev_arg);
+ event_debug_note_teardown_(ev);
+ if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
+ mm_free(ev);
+ }
+ break;
+ case EV_CLOSURE_CB_FINALIZE: {
+ void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
+ base->current_event = NULL;
+ EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ evcb_cbfinalize(evcb, evcb->evcb_arg);
+ }
+ break;
+ default:
+ EVUTIL_ASSERT(0);
+ }
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ base->current_event = NULL;
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (base->current_event_waiters) {
+ base->current_event_waiters = 0;
+ EVTHREAD_COND_BROADCAST(base->current_event_cond);
+ }
+#endif
+
+ if (base->event_break)
+ return -1;
+ if (count >= max_to_process)
+ return count;
+ if (count && endtime) {
+ struct timeval now;
+ update_time_cache(base);
+ gettime(base, &now);
+ if (evutil_timercmp(&now, endtime, >=))
+ return count;
+ }
+ if (base->event_continue)
+ break;
+ }
+ return count;
+}
+
+/*
+ * Active events are stored in priority queues. Lower priorities are always
+ * process before higher priorities. Low priority events can starve high
+ * priority ones.
+ */
+
+static int
+event_process_active(struct event_base *base)
+{
+ /* Caller must hold th_base_lock */
+ struct evcallback_list *activeq = NULL;
+ int i, c = 0;
+ const struct timeval *endtime;
+ struct timeval tv;
+ const int maxcb = base->max_dispatch_callbacks;
+ const int limit_after_prio = base->limit_callbacks_after_prio;
+ if (base->max_dispatch_time.tv_sec >= 0) {
+ update_time_cache(base);
+ gettime(base, &tv);
+ evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
+ endtime = &tv;
+ } else {
+ endtime = NULL;
+ }
+
+ for (i = 0; i < base->nactivequeues; ++i) {
+ if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
+ base->event_running_priority = i;
+ activeq = &base->activequeues[i];
+ if (i < limit_after_prio)
+ c = event_process_active_single_queue(base, activeq,
+ INT_MAX, NULL);
+ else
+ c = event_process_active_single_queue(base, activeq,
+ maxcb, endtime);
+ if (c < 0) {
+ goto done;
+ } else if (c > 0)
+ break; /* Processed a real event; do not
+ * consider lower-priority events */
+ /* If we get here, all of the events we processed
+ * were internal. Continue. */
+ }
+ }
+
+done:
+ base->event_running_priority = -1;
+
+ return c;
+}
+
+/*
+ * Wait continuously for events. We exit only if no events are left.
+ */
+
+int
+event_dispatch(void)
+{
+ return (event_loop(0));
+}
+
+int
+event_base_dispatch(struct event_base *event_base)
+{
+ return (event_base_loop(event_base, 0));
+}
+
+const char *
+event_base_get_method(const struct event_base *base)
+{
+ EVUTIL_ASSERT(base);
+ return (base->evsel->name);
+}
+
+/** Callback: used to implement event_base_loopexit by telling the event_base
+ * that it's time to exit its loop. */
+static void
+event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
+{
+ struct event_base *base = arg;
+ base->event_gotterm = 1;
+}
+
+int
+event_loopexit(const struct timeval *tv)
+{
+ return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
+ current_base, tv));
+}
+
+int
+event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
+{
+ return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
+ event_base, tv));
+}
+
+int
+event_loopbreak(void)
+{
+ return (event_base_loopbreak(current_base));
+}
+
+int
+event_base_loopbreak(struct event_base *event_base)
+{
+ int r = 0;
+ if (event_base == NULL)
+ return (-1);
+
+ EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
+ event_base->event_break = 1;
+
+ if (EVBASE_NEED_NOTIFY(event_base)) {
+ r = evthread_notify_base(event_base);
+ } else {
+ r = (0);
+ }
+ EVBASE_RELEASE_LOCK(event_base, th_base_lock);
+ return r;
+}
+
+int
+event_base_loopcontinue(struct event_base *event_base)
+{
+ int r = 0;
+ if (event_base == NULL)
+ return (-1);
+
+ EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
+ event_base->event_continue = 1;
+
+ if (EVBASE_NEED_NOTIFY(event_base)) {
+ r = evthread_notify_base(event_base);
+ } else {
+ r = (0);
+ }
+ EVBASE_RELEASE_LOCK(event_base, th_base_lock);
+ return r;
+}
+
+int
+event_base_got_break(struct event_base *event_base)
+{
+ int res;
+ EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
+ res = event_base->event_break;
+ EVBASE_RELEASE_LOCK(event_base, th_base_lock);
+ return res;
+}
+
+int
+event_base_got_exit(struct event_base *event_base)
+{
+ int res;
+ EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
+ res = event_base->event_gotterm;
+ EVBASE_RELEASE_LOCK(event_base, th_base_lock);
+ return res;
+}
+
+/* not thread safe */
+
+int
+event_loop(int flags)
+{
+ return event_base_loop(current_base, flags);
+}
+
+int
+event_base_loop(struct event_base *base, int flags)
+{
+ const struct eventop *evsel = base->evsel;
+ struct timeval tv;
+ struct timeval *tv_p;
+ int res, done, retval = 0;
+
+ /* Grab the lock. We will release it inside evsel.dispatch, and again
+ * as we invoke user callbacks. */
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ if (base->running_loop) {
+ event_warnx("%s: reentrant invocation. Only one event_base_loop"
+ " can run on each event_base at once.", __func__);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return -1;
+ }
+
+ base->running_loop = 1;
+
+ clear_time_cache(base);
+
+ if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
+ evsig_set_base_(base);
+
+ done = 0;
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ base->th_owner_id = EVTHREAD_GET_ID();
+#endif
+
+ base->event_gotterm = base->event_break = 0;
+
+ while (!done) {
+ base->event_continue = 0;
+ base->n_deferreds_queued = 0;
+
+ /* Terminate the loop if we have been asked to */
+ if (base->event_gotterm) {
+ break;
+ }
+
+ if (base->event_break) {
+ break;
+ }
+
+ tv_p = &tv;
+ if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
+ timeout_next(base, &tv_p);
+ } else {
+ /*
+ * if we have active events, we just poll new events
+ * without waiting.
+ */
+ evutil_timerclear(&tv);
+ }
+
+ /* If we have no events, we just exit */
+ if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
+ !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
+ event_debug(("%s: no events registered.", __func__));
+ retval = 1;
+ goto done;
+ }
+
+ event_queue_make_later_events_active(base);
+
+ clear_time_cache(base);
+
+ res = evsel->dispatch(base, tv_p);
+
+ if (res == -1) {
+ event_debug(("%s: dispatch returned unsuccessfully.",
+ __func__));
+ retval = -1;
+ goto done;
+ }
+
+ update_time_cache(base);
+
+ timeout_process(base);
+
+ if (N_ACTIVE_CALLBACKS(base)) {
+ int n = event_process_active(base);
+ if ((flags & EVLOOP_ONCE)
+ && N_ACTIVE_CALLBACKS(base) == 0
+ && n != 0)
+ done = 1;
+ } else if (flags & EVLOOP_NONBLOCK)
+ done = 1;
+ }
+ event_debug(("%s: asked to terminate loop.", __func__));
+
+done:
+ clear_time_cache(base);
+ base->running_loop = 0;
+
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+ return (retval);
+}
+
+/* One-time callback to implement event_base_once: invokes the user callback,
+ * then deletes the allocated storage */
+static void
+event_once_cb(evutil_socket_t fd, short events, void *arg)
+{
+ struct event_once *eonce = arg;
+
+ (*eonce->cb)(fd, events, eonce->arg);
+ EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
+ LIST_REMOVE(eonce, next_once);
+ EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
+ event_debug_unassign(&eonce->ev);
+ mm_free(eonce);
+}
+
+/* not threadsafe, event scheduled once. */
+int
+event_once(evutil_socket_t fd, short events,
+ void (*callback)(evutil_socket_t, short, void *),
+ void *arg, const struct timeval *tv)
+{
+ return event_base_once(current_base, fd, events, callback, arg, tv);
+}
+
+/* Schedules an event once */
+int
+event_base_once(struct event_base *base, evutil_socket_t fd, short events,
+ void (*callback)(evutil_socket_t, short, void *),
+ void *arg, const struct timeval *tv)
+{
+ struct event_once *eonce;
+ int res = 0;
+ int activate = 0;
+
+ /* We cannot support signals that just fire once, or persistent
+ * events. */
+ if (events & (EV_SIGNAL|EV_PERSIST))
+ return (-1);
+
+ if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
+ return (-1);
+
+ eonce->cb = callback;
+ eonce->arg = arg;
+
+ if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
+ evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
+
+ if (tv == NULL || ! evutil_timerisset(tv)) {
+ /* If the event is going to become active immediately,
+ * don't put it on the timeout queue. This is one
+ * idiom for scheduling a callback, so let's make
+ * it fast (and order-preserving). */
+ activate = 1;
+ }
+ } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
+ events &= EV_READ|EV_WRITE|EV_CLOSED;
+
+ event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
+ } else {
+ /* Bad event combination */
+ mm_free(eonce);
+ return (-1);
+ }
+
+ if (res == 0) {
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ if (activate)
+ event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
+ else
+ res = event_add_nolock_(&eonce->ev, tv, 0);
+
+ if (res != 0) {
+ mm_free(eonce);
+ return (res);
+ } else {
+ LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
+ }
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ }
+
+ return (0);
+}
+
+int
+event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
+{
+ if (!base)
+ base = current_base;
+ if (arg == &event_self_cbarg_ptr_)
+ arg = ev;
+
+ event_debug_assert_not_added_(ev);
+
+ ev->ev_base = base;
+
+ ev->ev_callback = callback;
+ ev->ev_arg = arg;
+ ev->ev_fd = fd;
+ ev->ev_events = events;
+ ev->ev_res = 0;
+ ev->ev_flags = EVLIST_INIT;
+ ev->ev_ncalls = 0;
+ ev->ev_pncalls = NULL;
+
+ if (events & EV_SIGNAL) {
+ if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
+ event_warnx("%s: EV_SIGNAL is not compatible with "
+ "EV_READ, EV_WRITE or EV_CLOSED", __func__);
+ return -1;
+ }
+ ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
+ } else {
+ if (events & EV_PERSIST) {
+ evutil_timerclear(&ev->ev_io_timeout);
+ ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
+ } else {
+ ev->ev_closure = EV_CLOSURE_EVENT;
+ }
+ }
+
+ min_heap_elem_init_(ev);
+
+ if (base != NULL) {
+ /* by default, we put new events into the middle priority */
+ ev->ev_pri = base->nactivequeues / 2;
+ }
+
+ event_debug_note_setup_(ev);
+
+ return 0;
+}
+
+int
+event_base_set(struct event_base *base, struct event *ev)
+{
+ /* Only innocent events may be assigned to a different base */
+ if (ev->ev_flags != EVLIST_INIT)
+ return (-1);
+
+ event_debug_assert_is_setup_(ev);
+
+ ev->ev_base = base;
+ ev->ev_pri = base->nactivequeues/2;
+
+ return (0);
+}
+
+void
+event_set(struct event *ev, evutil_socket_t fd, short events,
+ void (*callback)(evutil_socket_t, short, void *), void *arg)
+{
+ int r;
+ r = event_assign(ev, current_base, fd, events, callback, arg);
+ EVUTIL_ASSERT(r == 0);
+}
+
+void *
+event_self_cbarg(void)
+{
+ return &event_self_cbarg_ptr_;
+}
+
+struct event *
+event_base_get_running_event(struct event_base *base)
+{
+ struct event *ev = NULL;
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ if (EVBASE_IN_THREAD(base)) {
+ struct event_callback *evcb = base->current_event;
+ if (evcb->evcb_flags & EVLIST_INIT)
+ ev = event_callback_to_event(evcb);
+ }
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return ev;
+}
+
+struct event *
+event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
+{
+ struct event *ev;
+ ev = mm_malloc(sizeof(struct event));
+ if (ev == NULL)
+ return (NULL);
+ if (event_assign(ev, base, fd, events, cb, arg) < 0) {
+ mm_free(ev);
+ return (NULL);
+ }
+
+ return (ev);
+}
+
+void
+event_free(struct event *ev)
+{
+ /* This is disabled, so that events which have been finalized be a
+ * valid target for event_free(). That's */
+ // event_debug_assert_is_setup_(ev);
+
+ /* make sure that this event won't be coming back to haunt us. */
+ event_del(ev);
+ event_debug_note_teardown_(ev);
+ mm_free(ev);
+
+}
+
+void
+event_debug_unassign(struct event *ev)
+{
+ event_debug_assert_not_added_(ev);
+ event_debug_note_teardown_(ev);
+
+ ev->ev_flags &= ~EVLIST_INIT;
+}
+
+#define EVENT_FINALIZE_FREE_ 0x10000
+static int
+event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
+{
+ ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
+ EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
+
+ event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
+ ev->ev_closure = closure;
+ ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
+ event_active_nolock_(ev, EV_FINALIZE, 1);
+ ev->ev_flags |= EVLIST_FINALIZING;
+ return 0;
+}
+
+static int
+event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
+{
+ int r;
+ struct event_base *base = ev->ev_base;
+ if (EVUTIL_FAILURE_CHECK(!base)) {
+ event_warnx("%s: event has no event_base set.", __func__);
+ return -1;
+ }
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ r = event_finalize_nolock_(base, flags, ev, cb);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return r;
+}
+
+int
+event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
+{
+ return event_finalize_impl_(flags, ev, cb);
+}
+
+int
+event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
+{
+ return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
+}
+
+void
+event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
+{
+ struct event *ev = NULL;
+ if (evcb->evcb_flags & EVLIST_INIT) {
+ ev = event_callback_to_event(evcb);
+ event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
+ } else {
+ event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
+ }
+
+ evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
+ evcb->evcb_cb_union.evcb_cbfinalize = cb;
+ event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
+ evcb->evcb_flags |= EVLIST_FINALIZING;
+}
+
+void
+event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
+{
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ event_callback_finalize_nolock_(base, flags, evcb, cb);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+/** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided
+ * callback will be invoked on *one of them*, after they have *all* been
+ * finalized. */
+int
+event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
+{
+ int n_pending = 0, i;
+
+ if (base == NULL)
+ base = current_base;
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ event_debug(("%s: %d events finalizing", __func__, n_cbs));
+
+ /* At most one can be currently executing; the rest we just
+ * cancel... But we always make sure that the finalize callback
+ * runs. */
+ for (i = 0; i < n_cbs; ++i) {
+ struct event_callback *evcb = evcbs[i];
+ if (evcb == base->current_event) {
+ event_callback_finalize_nolock_(base, 0, evcb, cb);
+ ++n_pending;
+ } else {
+ event_callback_cancel_nolock_(base, evcb, 0);
+ }
+ }
+
+ if (n_pending == 0) {
+ /* Just do the first one. */
+ event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
+ }
+
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return 0;
+}
+
+/*
+ * Set's the priority of an event - if an event is already scheduled
+ * changing the priority is going to fail.
+ */
+
+int
+event_priority_set(struct event *ev, int pri)
+{
+ event_debug_assert_is_setup_(ev);
+
+ if (ev->ev_flags & EVLIST_ACTIVE)
+ return (-1);
+ if (pri < 0 || pri >= ev->ev_base->nactivequeues)
+ return (-1);
+
+ ev->ev_pri = pri;
+
+ return (0);
+}
+
+/*
+ * Checks if a specific event is pending or scheduled.
+ */
+
+int
+event_pending(const struct event *ev, short event, struct timeval *tv)
+{
+ int flags = 0;
+
+ if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
+ event_warnx("%s: event has no event_base set.", __func__);
+ return 0;
+ }
+
+ EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
+ event_debug_assert_is_setup_(ev);
+
+ if (ev->ev_flags & EVLIST_INSERTED)
+ flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
+ if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
+ flags |= ev->ev_res;
+ if (ev->ev_flags & EVLIST_TIMEOUT)
+ flags |= EV_TIMEOUT;
+
+ event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
+
+ /* See if there is a timeout that we should report */
+ if (tv != NULL && (flags & event & EV_TIMEOUT)) {
+ struct timeval tmp = ev->ev_timeout;
+ tmp.tv_usec &= MICROSECONDS_MASK;
+ /* correctly remamp to real time */
+ evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
+ }
+
+ EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
+
+ return (flags & event);
+}
+
+int
+event_initialized(const struct event *ev)
+{
+ if (!(ev->ev_flags & EVLIST_INIT))
+ return 0;
+
+ return 1;
+}
+
+void
+event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
+{
+ event_debug_assert_is_setup_(event);
+
+ if (base_out)
+ *base_out = event->ev_base;
+ if (fd_out)
+ *fd_out = event->ev_fd;
+ if (events_out)
+ *events_out = event->ev_events;
+ if (callback_out)
+ *callback_out = event->ev_callback;
+ if (arg_out)
+ *arg_out = event->ev_arg;
+}
+
+size_t
+event_get_struct_event_size(void)
+{
+ return sizeof(struct event);
+}
+
+evutil_socket_t
+event_get_fd(const struct event *ev)
+{
+ event_debug_assert_is_setup_(ev);
+ return ev->ev_fd;
+}
+
+struct event_base *
+event_get_base(const struct event *ev)
+{
+ event_debug_assert_is_setup_(ev);
+ return ev->ev_base;
+}
+
+short
+event_get_events(const struct event *ev)
+{
+ event_debug_assert_is_setup_(ev);
+ return ev->ev_events;
+}
+
+event_callback_fn
+event_get_callback(const struct event *ev)
+{
+ event_debug_assert_is_setup_(ev);
+ return ev->ev_callback;
+}
+
+void *
+event_get_callback_arg(const struct event *ev)
+{
+ event_debug_assert_is_setup_(ev);
+ return ev->ev_arg;
+}
+
+int
+event_get_priority(const struct event *ev)
+{
+ event_debug_assert_is_setup_(ev);
+ return ev->ev_pri;
+}
+
+int
+event_add(struct event *ev, const struct timeval *tv)
+{
+ int res;
+
+ if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
+ event_warnx("%s: event has no event_base set.", __func__);
+ return -1;
+ }
+
+ EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
+
+ res = event_add_nolock_(ev, tv, 0);
+
+ EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
+
+ return (res);
+}
+
+/* Helper callback: wake an event_base from another thread. This version
+ * works by writing a byte to one end of a socketpair, so that the event_base
+ * listening on the other end will wake up as the corresponding event
+ * triggers */
+static int
+evthread_notify_base_default(struct event_base *base)
+{
+ char buf[1];
+ int r;
+ buf[0] = (char) 0;
+#ifdef _WIN32
+ r = send(base->th_notify_fd[1], buf, 1, 0);
+#else
+ r = write(base->th_notify_fd[1], buf, 1);
+#endif
+ return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
+}
+
+#ifdef EVENT__HAVE_EVENTFD
+/* Helper callback: wake an event_base from another thread. This version
+ * assumes that you have a working eventfd() implementation. */
+static int
+evthread_notify_base_eventfd(struct event_base *base)
+{
+ ev_uint64_t msg = 1;
+ int r;
+ do {
+ r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
+ } while (r < 0 && errno == EAGAIN);
+
+ return (r < 0) ? -1 : 0;
+}
+#endif
+
+
+/** Tell the thread currently running the event_loop for base (if any) that it
+ * needs to stop waiting in its dispatch function (if it is) and process all
+ * active callbacks. */
+static int
+evthread_notify_base(struct event_base *base)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+ if (!base->th_notify_fn)
+ return -1;
+ if (base->is_notify_pending)
+ return 0;
+ base->is_notify_pending = 1;
+ return base->th_notify_fn(base);
+}
+
+/* Implementation function to remove a timeout on a currently pending event.
+ */
+int
+event_remove_timer_nolock_(struct event *ev)
+{
+ struct event_base *base = ev->ev_base;
+
+ EVENT_BASE_ASSERT_LOCKED(base);
+ event_debug_assert_is_setup_(ev);
+
+ event_debug(("event_remove_timer_nolock: event: %p", ev));
+
+ /* If it's not pending on a timeout, we don't need to do anything. */
+ if (ev->ev_flags & EVLIST_TIMEOUT) {
+ event_queue_remove_timeout(base, ev);
+ evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
+ }
+
+ return (0);
+}
+
+int
+event_remove_timer(struct event *ev)
+{
+ int res;
+
+ if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
+ event_warnx("%s: event has no event_base set.", __func__);
+ return -1;
+ }
+
+ EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
+
+ res = event_remove_timer_nolock_(ev);
+
+ EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
+
+ return (res);
+}
+
+/* Implementation function to add an event. Works just like event_add,
+ * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
+ * we treat tv as an absolute time, not as an interval to add to the current
+ * time */
+int
+event_add_nolock_(struct event *ev, const struct timeval *tv,
+ int tv_is_absolute)
+{
+ struct event_base *base = ev->ev_base;
+ int res = 0;
+ int notify = 0;
+
+ EVENT_BASE_ASSERT_LOCKED(base);
+ event_debug_assert_is_setup_(ev);
+
+ event_debug((
+ "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
+ ev,
+ EV_SOCK_ARG(ev->ev_fd),
+ ev->ev_events & EV_READ ? "EV_READ " : " ",
+ ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
+ ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
+ tv ? "EV_TIMEOUT " : " ",
+ ev->ev_callback));
+
+ EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
+
+ if (ev->ev_flags & EVLIST_FINALIZING) {
+ /* XXXX debug */
+ return (-1);
+ }
+
+ /*
+ * prepare for timeout insertion further below, if we get a
+ * failure on any step, we should not change any state.
+ */
+ if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
+ if (min_heap_reserve_(&base->timeheap,
+ 1 + min_heap_size_(&base->timeheap)) == -1)
+ return (-1); /* ENOMEM == errno */
+ }
+
+ /* If the main thread is currently executing a signal event's
+ * callback, and we are not the main thread, then we want to wait
+ * until the callback is done before we mess with the event, or else
+ * we can race on ev_ncalls and ev_pncalls below. */
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (base->current_event == event_to_event_callback(ev) &&
+ (ev->ev_events & EV_SIGNAL)
+ && !EVBASE_IN_THREAD(base)) {
+ ++base->current_event_waiters;
+ EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
+ }
+#endif
+
+ if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
+ !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
+ if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
+ res = evmap_io_add_(base, ev->ev_fd, ev);
+ else if (ev->ev_events & EV_SIGNAL)
+ res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
+ if (res != -1)
+ event_queue_insert_inserted(base, ev);
+ if (res == 1) {
+ /* evmap says we need to notify the main thread. */
+ notify = 1;
+ res = 0;
+ }
+ }
+
+ /*
+ * we should change the timeout state only if the previous event
+ * addition succeeded.
+ */
+ if (res != -1 && tv != NULL) {
+ struct timeval now;
+ int common_timeout;
+#ifdef USE_REINSERT_TIMEOUT
+ int was_common;
+ int old_timeout_idx;
+#endif
+
+ /*
+ * for persistent timeout events, we remember the
+ * timeout value and re-add the event.
+ *
+ * If tv_is_absolute, this was already set.
+ */
+ if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
+ ev->ev_io_timeout = *tv;
+
+#ifndef USE_REINSERT_TIMEOUT
+ if (ev->ev_flags & EVLIST_TIMEOUT) {
+ event_queue_remove_timeout(base, ev);
+ }
+#endif
+
+ /* Check if it is active due to a timeout. Rescheduling
+ * this timeout before the callback can be executed
+ * removes it from the active list. */
+ if ((ev->ev_flags & EVLIST_ACTIVE) &&
+ (ev->ev_res & EV_TIMEOUT)) {
+ if (ev->ev_events & EV_SIGNAL) {
+ /* See if we are just active executing
+ * this event in a loop
+ */
+ if (ev->ev_ncalls && ev->ev_pncalls) {
+ /* Abort loop */
+ *ev->ev_pncalls = 0;
+ }
+ }
+
+ event_queue_remove_active(base, event_to_event_callback(ev));
+ }
+
+ gettime(base, &now);
+
+ common_timeout = is_common_timeout(tv, base);
+#ifdef USE_REINSERT_TIMEOUT
+ was_common = is_common_timeout(&ev->ev_timeout, base);
+ old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
+#endif
+
+ if (tv_is_absolute) {
+ ev->ev_timeout = *tv;
+ } else if (common_timeout) {
+ struct timeval tmp = *tv;
+ tmp.tv_usec &= MICROSECONDS_MASK;
+ evutil_timeradd(&now, &tmp, &ev->ev_timeout);
+ ev->ev_timeout.tv_usec |=
+ (tv->tv_usec & ~MICROSECONDS_MASK);
+ } else {
+ evutil_timeradd(&now, tv, &ev->ev_timeout);
+ }
+
+ event_debug((
+ "event_add: event %p, timeout in %d seconds %d useconds, call %p",
+ ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
+
+#ifdef USE_REINSERT_TIMEOUT
+ event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
+#else
+ event_queue_insert_timeout(base, ev);
+#endif
+
+ if (common_timeout) {
+ struct common_timeout_list *ctl =
+ get_common_timeout_list(base, &ev->ev_timeout);
+ if (ev == TAILQ_FIRST(&ctl->events)) {
+ common_timeout_schedule(ctl, &now, ev);
+ }
+ } else {
+ struct event* top = NULL;
+ /* See if the earliest timeout is now earlier than it
+ * was before: if so, we will need to tell the main
+ * thread to wake up earlier than it would otherwise.
+ * We double check the timeout of the top element to
+ * handle time distortions due to system suspension.
+ */
+ if (min_heap_elt_is_top_(ev))
+ notify = 1;
+ else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
+ evutil_timercmp(&top->ev_timeout, &now, <))
+ notify = 1;
+ }
+ }
+
+ /* if we are not in the right thread, we need to wake up the loop */
+ if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
+ evthread_notify_base(base);
+
+ event_debug_note_add_(ev);
+
+ return (res);
+}
+
+static int
+event_del_(struct event *ev, int blocking)
+{
+ int res;
+
+ if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
+ event_warnx("%s: event has no event_base set.", __func__);
+ return -1;
+ }
+
+ EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
+
+ res = event_del_nolock_(ev, blocking);
+
+ EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
+
+ return (res);
+}
+
+int
+event_del(struct event *ev)
+{
+ return event_del_(ev, EVENT_DEL_AUTOBLOCK);
+}
+
+int
+event_del_block(struct event *ev)
+{
+ return event_del_(ev, EVENT_DEL_BLOCK);
+}
+
+int
+event_del_noblock(struct event *ev)
+{
+ return event_del_(ev, EVENT_DEL_NOBLOCK);
+}
+
+/** Helper for event_del: always called with th_base_lock held.
+ *
+ * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
+ * EVEN_IF_FINALIZING} values. See those for more information.
+ */
+int
+event_del_nolock_(struct event *ev, int blocking)
+{
+ struct event_base *base;
+ int res = 0, notify = 0;
+
+ event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
+ ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
+
+ /* An event without a base has not been added */
+ if (ev->ev_base == NULL)
+ return (-1);
+
+ EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
+
+ if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
+ if (ev->ev_flags & EVLIST_FINALIZING) {
+ /* XXXX Debug */
+ return 0;
+ }
+ }
+
+ /* If the main thread is currently executing this event's callback,
+ * and we are not the main thread, then we want to wait until the
+ * callback is done before we start removing the event. That way,
+ * when this function returns, it will be safe to free the
+ * user-supplied argument. */
+ base = ev->ev_base;
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (blocking != EVENT_DEL_NOBLOCK &&
+ base->current_event == event_to_event_callback(ev) &&
+ !EVBASE_IN_THREAD(base) &&
+ (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
+ ++base->current_event_waiters;
+ EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
+ }
+#endif
+
+ EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
+
+ /* See if we are just active executing this event in a loop */
+ if (ev->ev_events & EV_SIGNAL) {
+ if (ev->ev_ncalls && ev->ev_pncalls) {
+ /* Abort loop */
+ *ev->ev_pncalls = 0;
+ }
+ }
+
+ if (ev->ev_flags & EVLIST_TIMEOUT) {
+ /* NOTE: We never need to notify the main thread because of a
+ * deleted timeout event: all that could happen if we don't is
+ * that the dispatch loop might wake up too early. But the
+ * point of notifying the main thread _is_ to wake up the
+ * dispatch loop early anyway, so we wouldn't gain anything by
+ * doing it.
+ */
+ event_queue_remove_timeout(base, ev);
+ }
+
+ if (ev->ev_flags & EVLIST_ACTIVE)
+ event_queue_remove_active(base, event_to_event_callback(ev));
+ else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
+ event_queue_remove_active_later(base, event_to_event_callback(ev));
+
+ if (ev->ev_flags & EVLIST_INSERTED) {
+ event_queue_remove_inserted(base, ev);
+ if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
+ res = evmap_io_del_(base, ev->ev_fd, ev);
+ else
+ res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
+ if (res == 1) {
+ /* evmap says we need to notify the main thread. */
+ notify = 1;
+ res = 0;
+ }
+ }
+
+ /* if we are not in the right thread, we need to wake up the loop */
+ if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
+ evthread_notify_base(base);
+
+ event_debug_note_del_(ev);
+
+ return (res);
+}
+
+void
+event_active(struct event *ev, int res, short ncalls)
+{
+ if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
+ event_warnx("%s: event has no event_base set.", __func__);
+ return;
+ }
+
+ EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
+
+ event_debug_assert_is_setup_(ev);
+
+ event_active_nolock_(ev, res, ncalls);
+
+ EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
+}
+
+
+void
+event_active_nolock_(struct event *ev, int res, short ncalls)
+{
+ struct event_base *base;
+
+ event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
+ ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
+
+ base = ev->ev_base;
+ EVENT_BASE_ASSERT_LOCKED(base);
+
+ if (ev->ev_flags & EVLIST_FINALIZING) {
+ /* XXXX debug */
+ return;
+ }
+
+ switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
+ default:
+ case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
+ EVUTIL_ASSERT(0);
+ break;
+ case EVLIST_ACTIVE:
+ /* We get different kinds of events, add them together */
+ ev->ev_res |= res;
+ return;
+ case EVLIST_ACTIVE_LATER:
+ ev->ev_res |= res;
+ break;
+ case 0:
+ ev->ev_res = res;
+ break;
+ }
+
+ if (ev->ev_pri < base->event_running_priority)
+ base->event_continue = 1;
+
+ if (ev->ev_events & EV_SIGNAL) {
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (base->current_event == event_to_event_callback(ev) &&
+ !EVBASE_IN_THREAD(base)) {
+ ++base->current_event_waiters;
+ EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
+ }
+#endif
+ ev->ev_ncalls = ncalls;
+ ev->ev_pncalls = NULL;
+ }
+
+ event_callback_activate_nolock_(base, event_to_event_callback(ev));
+}
+
+void
+event_active_later_(struct event *ev, int res)
+{
+ EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
+ event_active_later_nolock_(ev, res);
+ EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
+}
+
+void
+event_active_later_nolock_(struct event *ev, int res)
+{
+ struct event_base *base = ev->ev_base;
+ EVENT_BASE_ASSERT_LOCKED(base);
+
+ if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
+ /* We get different kinds of events, add them together */
+ ev->ev_res |= res;
+ return;
+ }
+
+ ev->ev_res = res;
+
+ event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
+}
+
+int
+event_callback_activate_(struct event_base *base,
+ struct event_callback *evcb)
+{
+ int r;
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ r = event_callback_activate_nolock_(base, evcb);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return r;
+}
+
+int
+event_callback_activate_nolock_(struct event_base *base,
+ struct event_callback *evcb)
+{
+ int r = 1;
+
+ if (evcb->evcb_flags & EVLIST_FINALIZING)
+ return 0;
+
+ switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
+ default:
+ EVUTIL_ASSERT(0);
+ case EVLIST_ACTIVE_LATER:
+ event_queue_remove_active_later(base, evcb);
+ r = 0;
+ break;
+ case EVLIST_ACTIVE:
+ return 0;
+ case 0:
+ break;
+ }
+
+ event_queue_insert_active(base, evcb);
+
+ if (EVBASE_NEED_NOTIFY(base))
+ evthread_notify_base(base);
+
+ return r;
+}
+
+int
+event_callback_activate_later_nolock_(struct event_base *base,
+ struct event_callback *evcb)
+{
+ if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
+ return 0;
+
+ event_queue_insert_active_later(base, evcb);
+ if (EVBASE_NEED_NOTIFY(base))
+ evthread_notify_base(base);
+ return 1;
+}
+
+void
+event_callback_init_(struct event_base *base,
+ struct event_callback *cb)
+{
+ memset(cb, 0, sizeof(*cb));
+ cb->evcb_pri = base->nactivequeues - 1;
+}
+
+int
+event_callback_cancel_(struct event_base *base,
+ struct event_callback *evcb)
+{
+ int r;
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ r = event_callback_cancel_nolock_(base, evcb, 0);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return r;
+}
+
+int
+event_callback_cancel_nolock_(struct event_base *base,
+ struct event_callback *evcb, int even_if_finalizing)
+{
+ if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
+ return 0;
+
+ if (evcb->evcb_flags & EVLIST_INIT)
+ return event_del_nolock_(event_callback_to_event(evcb),
+ even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
+
+ switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
+ default:
+ case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
+ EVUTIL_ASSERT(0);
+ break;
+ case EVLIST_ACTIVE:
+ /* We get different kinds of events, add them together */
+ event_queue_remove_active(base, evcb);
+ return 0;
+ case EVLIST_ACTIVE_LATER:
+ event_queue_remove_active_later(base, evcb);
+ break;
+ case 0:
+ break;
+ }
+
+ return 0;
+}
+
+void
+event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
+{
+ memset(cb, 0, sizeof(*cb));
+ cb->evcb_cb_union.evcb_selfcb = fn;
+ cb->evcb_arg = arg;
+ cb->evcb_pri = priority;
+ cb->evcb_closure = EV_CLOSURE_CB_SELF;
+}
+
+void
+event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
+{
+ cb->evcb_pri = priority;
+}
+
+void
+event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
+{
+ if (!base)
+ base = current_base;
+ event_callback_cancel_(base, cb);
+}
+
+#define MAX_DEFERREDS_QUEUED 32
+int
+event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
+{
+ int r = 1;
+ if (!base)
+ base = current_base;
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
+ r = event_callback_activate_later_nolock_(base, cb);
+ } else {
+ r = event_callback_activate_nolock_(base, cb);
+ if (r) {
+ ++base->n_deferreds_queued;
+ }
+ }
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return r;
+}
+
+static int
+timeout_next(struct event_base *base, struct timeval **tv_p)
+{
+ /* Caller must hold th_base_lock */
+ struct timeval now;
+ struct event *ev;
+ struct timeval *tv = *tv_p;
+ int res = 0;
+
+ ev = min_heap_top_(&base->timeheap);
+
+ if (ev == NULL) {
+ /* if no time-based events are active wait for I/O */
+ *tv_p = NULL;
+ goto out;
+ }
+
+ if (gettime(base, &now) == -1) {
+ res = -1;
+ goto out;
+ }
+
+ if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
+ evutil_timerclear(tv);
+ goto out;
+ }
+
+ evutil_timersub(&ev->ev_timeout, &now, tv);
+
+ EVUTIL_ASSERT(tv->tv_sec >= 0);
+ EVUTIL_ASSERT(tv->tv_usec >= 0);
+ event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
+
+out:
+ return (res);
+}
+
+/* Activate every event whose timeout has elapsed. */
+static void
+timeout_process(struct event_base *base)
+{
+ /* Caller must hold lock. */
+ struct timeval now;
+ struct event *ev;
+
+ if (min_heap_empty_(&base->timeheap)) {
+ return;
+ }
+
+ gettime(base, &now);
+
+ while ((ev = min_heap_top_(&base->timeheap))) {
+ if (evutil_timercmp(&ev->ev_timeout, &now, >))
+ break;
+
+ /* delete this event from the I/O queues */
+ event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
+
+ event_debug(("timeout_process: event: %p, call %p",
+ ev, ev->ev_callback));
+ event_active_nolock_(ev, EV_TIMEOUT, 1);
+ }
+}
+
+#if (EVLIST_INTERNAL >> 4) != 1
+#error "Mismatch for value of EVLIST_INTERNAL"
+#endif
+
+#ifndef MAX
+#define MAX(a,b) (((a)>(b))?(a):(b))
+#endif
+
+#define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
+
+/* These are a fancy way to spell
+ if (flags & EVLIST_INTERNAL)
+ base->event_count--/++;
+*/
+#define DECR_EVENT_COUNT(base,flags) \
+ ((base)->event_count -= (~((flags) >> 4) & 1))
+#define INCR_EVENT_COUNT(base,flags) do { \
+ ((base)->event_count += (~((flags) >> 4) & 1)); \
+ MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \
+} while (0)
+
+static void
+event_queue_remove_inserted(struct event_base *base, struct event *ev)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+ if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
+ event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
+ ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
+ return;
+ }
+ DECR_EVENT_COUNT(base, ev->ev_flags);
+ ev->ev_flags &= ~EVLIST_INSERTED;
+}
+static void
+event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+ if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
+ event_errx(1, "%s: %p not on queue %x", __func__,
+ evcb, EVLIST_ACTIVE);
+ return;
+ }
+ DECR_EVENT_COUNT(base, evcb->evcb_flags);
+ evcb->evcb_flags &= ~EVLIST_ACTIVE;
+ base->event_count_active--;
+
+ TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
+ evcb, evcb_active_next);
+}
+static void
+event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+ if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
+ event_errx(1, "%s: %p not on queue %x", __func__,
+ evcb, EVLIST_ACTIVE_LATER);
+ return;
+ }
+ DECR_EVENT_COUNT(base, evcb->evcb_flags);
+ evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
+ base->event_count_active--;
+
+ TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
+}
+static void
+event_queue_remove_timeout(struct event_base *base, struct event *ev)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+ if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
+ event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
+ ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
+ return;
+ }
+ DECR_EVENT_COUNT(base, ev->ev_flags);
+ ev->ev_flags &= ~EVLIST_TIMEOUT;
+
+ if (is_common_timeout(&ev->ev_timeout, base)) {
+ struct common_timeout_list *ctl =
+ get_common_timeout_list(base, &ev->ev_timeout);
+ TAILQ_REMOVE(&ctl->events, ev,
+ ev_timeout_pos.ev_next_with_common_timeout);
+ } else {
+ min_heap_erase_(&base->timeheap, ev);
+ }
+}
+
+#ifdef USE_REINSERT_TIMEOUT
+/* Remove and reinsert 'ev' into the timeout queue. */
+static void
+event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
+ int was_common, int is_common, int old_timeout_idx)
+{
+ struct common_timeout_list *ctl;
+ if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
+ event_queue_insert_timeout(base, ev);
+ return;
+ }
+
+ switch ((was_common<<1) | is_common) {
+ case 3: /* Changing from one common timeout to another */
+ ctl = base->common_timeout_queues[old_timeout_idx];
+ TAILQ_REMOVE(&ctl->events, ev,
+ ev_timeout_pos.ev_next_with_common_timeout);
+ ctl = get_common_timeout_list(base, &ev->ev_timeout);
+ insert_common_timeout_inorder(ctl, ev);
+ break;
+ case 2: /* Was common; is no longer common */
+ ctl = base->common_timeout_queues[old_timeout_idx];
+ TAILQ_REMOVE(&ctl->events, ev,
+ ev_timeout_pos.ev_next_with_common_timeout);
+ min_heap_push_(&base->timeheap, ev);
+ break;
+ case 1: /* Wasn't common; has become common. */
+ min_heap_erase_(&base->timeheap, ev);
+ ctl = get_common_timeout_list(base, &ev->ev_timeout);
+ insert_common_timeout_inorder(ctl, ev);
+ break;
+ case 0: /* was in heap; is still on heap. */
+ min_heap_adjust_(&base->timeheap, ev);
+ break;
+ default:
+ EVUTIL_ASSERT(0); /* unreachable */
+ break;
+ }
+}
+#endif
+
+/* Add 'ev' to the common timeout list in 'ev'. */
+static void
+insert_common_timeout_inorder(struct common_timeout_list *ctl,
+ struct event *ev)
+{
+ struct event *e;
+ /* By all logic, we should just be able to append 'ev' to the end of
+ * ctl->events, since the timeout on each 'ev' is set to {the common
+ * timeout} + {the time when we add the event}, and so the events
+ * should arrive in order of their timeeouts. But just in case
+ * there's some wacky threading issue going on, we do a search from
+ * the end of 'ev' to find the right insertion point.
+ */
+ TAILQ_FOREACH_REVERSE(e, &ctl->events,
+ event_list, ev_timeout_pos.ev_next_with_common_timeout) {
+ /* This timercmp is a little sneaky, since both ev and e have
+ * magic values in tv_usec. Fortunately, they ought to have
+ * the _same_ magic values in tv_usec. Let's assert for that.
+ */
+ EVUTIL_ASSERT(
+ is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
+ if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
+ TAILQ_INSERT_AFTER(&ctl->events, e, ev,
+ ev_timeout_pos.ev_next_with_common_timeout);
+ return;
+ }
+ }
+ TAILQ_INSERT_HEAD(&ctl->events, ev,
+ ev_timeout_pos.ev_next_with_common_timeout);
+}
+
+static void
+event_queue_insert_inserted(struct event_base *base, struct event *ev)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+
+ if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
+ event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
+ ev, EV_SOCK_ARG(ev->ev_fd));
+ return;
+ }
+
+ INCR_EVENT_COUNT(base, ev->ev_flags);
+
+ ev->ev_flags |= EVLIST_INSERTED;
+}
+
+static void
+event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+
+ if (evcb->evcb_flags & EVLIST_ACTIVE) {
+ /* Double insertion is possible for active events */
+ return;
+ }
+
+ INCR_EVENT_COUNT(base, evcb->evcb_flags);
+
+ evcb->evcb_flags |= EVLIST_ACTIVE;
+
+ base->event_count_active++;
+ MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
+ EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
+ TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
+ evcb, evcb_active_next);
+}
+
+static void
+event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+ if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
+ /* Double insertion is possible */
+ return;
+ }
+
+ INCR_EVENT_COUNT(base, evcb->evcb_flags);
+ evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
+ base->event_count_active++;
+ MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
+ EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
+ TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
+}
+
+static void
+event_queue_insert_timeout(struct event_base *base, struct event *ev)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+
+ if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
+ event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
+ ev, EV_SOCK_ARG(ev->ev_fd));
+ return;
+ }
+
+ INCR_EVENT_COUNT(base, ev->ev_flags);
+
+ ev->ev_flags |= EVLIST_TIMEOUT;
+
+ if (is_common_timeout(&ev->ev_timeout, base)) {
+ struct common_timeout_list *ctl =
+ get_common_timeout_list(base, &ev->ev_timeout);
+ insert_common_timeout_inorder(ctl, ev);
+ } else {
+ min_heap_push_(&base->timeheap, ev);
+ }
+}
+
+static void
+event_queue_make_later_events_active(struct event_base *base)
+{
+ struct event_callback *evcb;
+ EVENT_BASE_ASSERT_LOCKED(base);
+
+ while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
+ TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
+ evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
+ EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
+ TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
+ base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
+ }
+}
+
+/* Functions for debugging */
+
+const char *
+event_get_version(void)
+{
+ return (EVENT__VERSION);
+}
+
+ev_uint32_t
+event_get_version_number(void)
+{
+ return (EVENT__NUMERIC_VERSION);
+}
+
+/*
+ * No thread-safe interface needed - the information should be the same
+ * for all threads.
+ */
+
+const char *
+event_get_method(void)
+{
+ return (current_base->evsel->name);
+}
+
+#ifndef EVENT__DISABLE_MM_REPLACEMENT
+static void *(*mm_malloc_fn_)(size_t sz) = NULL;
+static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
+static void (*mm_free_fn_)(void *p) = NULL;
+
+void *
+event_mm_malloc_(size_t sz)
+{
+ if (sz == 0)
+ return NULL;
+
+ if (mm_malloc_fn_)
+ return mm_malloc_fn_(sz);
+ else
+ return malloc(sz);
+}
+
+void *
+event_mm_calloc_(size_t count, size_t size)
+{
+ if (count == 0 || size == 0)
+ return NULL;
+
+ if (mm_malloc_fn_) {
+ size_t sz = count * size;
+ void *p = NULL;
+ if (count > EV_SIZE_MAX / size)
+ goto error;
+ p = mm_malloc_fn_(sz);
+ if (p)
+ return memset(p, 0, sz);
+ } else {
+ void *p = calloc(count, size);
+#ifdef _WIN32
+ /* Windows calloc doesn't reliably set ENOMEM */
+ if (p == NULL)
+ goto error;
+#endif
+ return p;
+ }
+
+error:
+ errno = ENOMEM;
+ return NULL;
+}
+
+char *
+event_mm_strdup_(const char *str)
+{
+ if (!str) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ if (mm_malloc_fn_) {
+ size_t ln = strlen(str);
+ void *p = NULL;
+ if (ln == EV_SIZE_MAX)
+ goto error;
+ p = mm_malloc_fn_(ln+1);
+ if (p)
+ return memcpy(p, str, ln+1);
+ } else
+#ifdef _WIN32
+ return _strdup(str);
+#else
+ return strdup(str);
+#endif
+
+error:
+ errno = ENOMEM;
+ return NULL;
+}
+
+void *
+event_mm_realloc_(void *ptr, size_t sz)
+{
+ if (mm_realloc_fn_)
+ return mm_realloc_fn_(ptr, sz);
+ else
+ return realloc(ptr, sz);
+}
+
+void
+event_mm_free_(void *ptr)
+{
+ if (mm_free_fn_)
+ mm_free_fn_(ptr);
+ else
+ free(ptr);
+}
+
+void
+event_set_mem_functions(void *(*malloc_fn)(size_t sz),
+ void *(*realloc_fn)(void *ptr, size_t sz),
+ void (*free_fn)(void *ptr))
+{
+ mm_malloc_fn_ = malloc_fn;
+ mm_realloc_fn_ = realloc_fn;
+ mm_free_fn_ = free_fn;
+}
+#endif
+
+#ifdef EVENT__HAVE_EVENTFD
+static void
+evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
+{
+ ev_uint64_t msg;
+ ev_ssize_t r;
+ struct event_base *base = arg;
+
+ r = read(fd, (void*) &msg, sizeof(msg));
+ if (r<0 && errno != EAGAIN) {
+ event_sock_warn(fd, "Error reading from eventfd");
+ }
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ base->is_notify_pending = 0;
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+#endif
+
+static void
+evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
+{
+ unsigned char buf[1024];
+ struct event_base *base = arg;
+#ifdef _WIN32
+ while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
+ ;
+#else
+ while (read(fd, (char*)buf, sizeof(buf)) > 0)
+ ;
+#endif
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ base->is_notify_pending = 0;
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+int
+evthread_make_base_notifiable(struct event_base *base)
+{
+ int r;
+ if (!base)
+ return -1;
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ r = evthread_make_base_notifiable_nolock_(base);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return r;
+}
+
+static int
+evthread_make_base_notifiable_nolock_(struct event_base *base)
+{
+ void (*cb)(evutil_socket_t, short, void *);
+ int (*notify)(struct event_base *);
+
+ if (base->th_notify_fn != NULL) {
+ /* The base is already notifiable: we're doing fine. */
+ return 0;
+ }
+
+#if defined(EVENT__HAVE_WORKING_KQUEUE)
+ if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
+ base->th_notify_fn = event_kq_notify_base_;
+ /* No need to add an event here; the backend can wake
+ * itself up just fine. */
+ return 0;
+ }
+#endif
+
+#ifdef EVENT__HAVE_EVENTFD
+ base->th_notify_fd[0] = evutil_eventfd_(0,
+ EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
+ if (base->th_notify_fd[0] >= 0) {
+ base->th_notify_fd[1] = -1;
+ notify = evthread_notify_base_eventfd;
+ cb = evthread_notify_drain_eventfd;
+ } else
+#endif
+ if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
+ notify = evthread_notify_base_default;
+ cb = evthread_notify_drain_default;
+ } else {
+ return -1;
+ }
+
+ base->th_notify_fn = notify;
+
+ /* prepare an event that we can use for wakeup */
+ event_assign(&base->th_notify, base, base->th_notify_fd[0],
+ EV_READ|EV_PERSIST, cb, base);
+
+ /* we need to mark this as internal event */
+ base->th_notify.ev_flags |= EVLIST_INTERNAL;
+ event_priority_set(&base->th_notify, 0);
+
+ return event_add_nolock_(&base->th_notify, NULL, 0);
+}
+
+int
+event_base_foreach_event_nolock_(struct event_base *base,
+ event_base_foreach_event_cb fn, void *arg)
+{
+ int r, i;
+ unsigned u;
+ struct event *ev;
+
+ /* Start out with all the EVLIST_INSERTED events. */
+ if ((r = evmap_foreach_event_(base, fn, arg)))
+ return r;
+
+ /* Okay, now we deal with those events that have timeouts and are in
+ * the min-heap. */
+ for (u = 0; u < base->timeheap.n; ++u) {
+ ev = base->timeheap.p[u];
+ if (ev->ev_flags & EVLIST_INSERTED) {
+ /* we already processed this one */
+ continue;
+ }
+ if ((r = fn(base, ev, arg)))
+ return r;
+ }
+
+ /* Now for the events in one of the timeout queues.
+ * the min-heap. */
+ for (i = 0; i < base->n_common_timeouts; ++i) {
+ struct common_timeout_list *ctl =
+ base->common_timeout_queues[i];
+ TAILQ_FOREACH(ev, &ctl->events,
+ ev_timeout_pos.ev_next_with_common_timeout) {
+ if (ev->ev_flags & EVLIST_INSERTED) {
+ /* we already processed this one */
+ continue;
+ }
+ if ((r = fn(base, ev, arg)))
+ return r;
+ }
+ }
+
+ /* Finally, we deal wit all the active events that we haven't touched
+ * yet. */
+ for (i = 0; i < base->nactivequeues; ++i) {
+ struct event_callback *evcb;
+ TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
+ if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
+ /* This isn't an event (evlist_init clear), or
+ * we already processed it. (inserted or
+ * timeout set */
+ continue;
+ }
+ ev = event_callback_to_event(evcb);
+ if ((r = fn(base, ev, arg)))
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+/* Helper for event_base_dump_events: called on each event in the event base;
+ * dumps only the inserted events. */
+static int
+dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
+{
+ FILE *output = arg;
+ const char *gloss = (e->ev_events & EV_SIGNAL) ?
+ "sig" : "fd ";
+
+ if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
+ return 0;
+
+ fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s",
+ (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
+ (e->ev_events&EV_READ)?" Read":"",
+ (e->ev_events&EV_WRITE)?" Write":"",
+ (e->ev_events&EV_CLOSED)?" EOF":"",
+ (e->ev_events&EV_SIGNAL)?" Signal":"",
+ (e->ev_events&EV_PERSIST)?" Persist":"",
+ (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
+ if (e->ev_flags & EVLIST_TIMEOUT) {
+ struct timeval tv;
+ tv.tv_sec = e->ev_timeout.tv_sec;
+ tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
+ evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
+ fprintf(output, " Timeout=%ld.%06d",
+ (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
+ }
+ fputc('\n', output);
+
+ return 0;
+}
+
+/* Helper for event_base_dump_events: called on each event in the event base;
+ * dumps only the active events. */
+static int
+dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
+{
+ FILE *output = arg;
+ const char *gloss = (e->ev_events & EV_SIGNAL) ?
+ "sig" : "fd ";
+
+ if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
+ return 0;
+
+ fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
+ (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
+ (e->ev_res&EV_READ)?" Read":"",
+ (e->ev_res&EV_WRITE)?" Write":"",
+ (e->ev_res&EV_CLOSED)?" EOF":"",
+ (e->ev_res&EV_SIGNAL)?" Signal":"",
+ (e->ev_res&EV_TIMEOUT)?" Timeout":"",
+ (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
+ (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
+
+ return 0;
+}
+
+int
+event_base_foreach_event(struct event_base *base,
+ event_base_foreach_event_cb fn, void *arg)
+{
+ int r;
+ if ((!fn) || (!base)) {
+ return -1;
+ }
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ r = event_base_foreach_event_nolock_(base, fn, arg);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return r;
+}
+
+
+void
+event_base_dump_events(struct event_base *base, FILE *output)
+{
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ fprintf(output, "Inserted events:\n");
+ event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
+
+ fprintf(output, "Active events:\n");
+ event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+void
+event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
+{
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+void
+event_base_active_by_signal(struct event_base *base, int sig)
+{
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ evmap_signal_active_(base, sig, 1);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+
+void
+event_base_add_virtual_(struct event_base *base)
+{
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ base->virtual_event_count++;
+ MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+void
+event_base_del_virtual_(struct event_base *base)
+{
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ EVUTIL_ASSERT(base->virtual_event_count > 0);
+ base->virtual_event_count--;
+ if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
+ evthread_notify_base(base);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+static void
+event_free_debug_globals_locks(void)
+{
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+#ifndef EVENT__DISABLE_DEBUG_MODE
+ if (event_debug_map_lock_ != NULL) {
+ EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
+ event_debug_map_lock_ = NULL;
+ evthreadimpl_disable_lock_debugging_();
+ }
+#endif /* EVENT__DISABLE_DEBUG_MODE */
+#endif /* EVENT__DISABLE_THREAD_SUPPORT */
+ return;
+}
+
+static void
+event_free_debug_globals(void)
+{
+ event_free_debug_globals_locks();
+}
+
+static void
+event_free_evsig_globals(void)
+{
+ evsig_free_globals_();
+}
+
+static void
+event_free_evutil_globals(void)
+{
+ evutil_free_globals_();
+}
+
+static void
+event_free_globals(void)
+{
+ event_free_debug_globals();
+ event_free_evsig_globals();
+ event_free_evutil_globals();
+}
+
+void
+libevent_global_shutdown(void)
+{
+ event_disable_debug_mode();
+ event_free_globals();
+}
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+int
+event_global_setup_locks_(const int enable_locks)
+{
+#ifndef EVENT__DISABLE_DEBUG_MODE
+ EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
+#endif
+ if (evsig_global_setup_locks_(enable_locks) < 0)
+ return -1;
+ if (evutil_global_setup_locks_(enable_locks) < 0)
+ return -1;
+ if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
+ return -1;
+ return 0;
+}
+#endif
+
+void
+event_base_assert_ok_(struct event_base *base)
+{
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ event_base_assert_ok_nolock_(base);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+void
+event_base_assert_ok_nolock_(struct event_base *base)
+{
+ int i;
+ int count;
+
+ /* First do checks on the per-fd and per-signal lists */
+ evmap_check_integrity_(base);
+
+ /* Check the heap property */
+ for (i = 1; i < (int)base->timeheap.n; ++i) {
+ int parent = (i - 1) / 2;
+ struct event *ev, *p_ev;
+ ev = base->timeheap.p[i];
+ p_ev = base->timeheap.p[parent];
+ EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
+ EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
+ EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
+ }
+
+ /* Check that the common timeouts are fine */
+ for (i = 0; i < base->n_common_timeouts; ++i) {
+ struct common_timeout_list *ctl = base->common_timeout_queues[i];
+ struct event *last=NULL, *ev;
+
+ EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
+
+ TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
+ if (last)
+ EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
+ EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
+ EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
+ EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
+ last = ev;
+ }
+ }
+
+ /* Check the active queues. */
+ count = 0;
+ for (i = 0; i < base->nactivequeues; ++i) {
+ struct event_callback *evcb;
+ EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
+ TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
+ EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
+ EVUTIL_ASSERT(evcb->evcb_pri == i);
+ ++count;
+ }
+ }
+
+ {
+ struct event_callback *evcb;
+ TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
+ EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
+ ++count;
+ }
+ }
+ EVUTIL_ASSERT(count == base->event_count_active);
+}
diff --git a/libs/libevent/src/event_iocp.c b/libs/libevent/src/event_iocp.c
new file mode 100644
index 0000000000..a9902fbc42
--- /dev/null
+++ b/libs/libevent/src/event_iocp.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos, Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "evconfig-private.h"
+
+#ifndef _WIN32_WINNT
+/* Minimum required for InitializeCriticalSectionAndSpinCount */
+#define _WIN32_WINNT 0x0403
+#endif
+#include <winsock2.h>
+#include <windows.h>
+#include <process.h>
+#include <stdio.h>
+#include <mswsock.h>
+
+#include "event2/util.h"
+#include "util-internal.h"
+#include "iocp-internal.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "event-internal.h"
+#include "evthread-internal.h"
+
+#define NOTIFICATION_KEY ((ULONG_PTR)-1)
+
+void
+event_overlapped_init_(struct event_overlapped *o, iocp_callback cb)
+{
+ memset(o, 0, sizeof(struct event_overlapped));
+ o->cb = cb;
+}
+
+static void
+handle_entry(OVERLAPPED *o, ULONG_PTR completion_key, DWORD nBytes, int ok)
+{
+ struct event_overlapped *eo =
+ EVUTIL_UPCAST(o, struct event_overlapped, overlapped);
+ eo->cb(eo, completion_key, nBytes, ok);
+}
+
+static void
+loop(void *port_)
+{
+ struct event_iocp_port *port = port_;
+ long ms = port->ms;
+ HANDLE p = port->port;
+
+ if (ms <= 0)
+ ms = INFINITE;
+
+ while (1) {
+ OVERLAPPED *overlapped=NULL;
+ ULONG_PTR key=0;
+ DWORD bytes=0;
+ int ok = GetQueuedCompletionStatus(p, &bytes, &key,
+ &overlapped, ms);
+ EnterCriticalSection(&port->lock);
+ if (port->shutdown) {
+ if (--port->n_live_threads == 0)
+ ReleaseSemaphore(port->shutdownSemaphore, 1,
+ NULL);
+ LeaveCriticalSection(&port->lock);
+ return;
+ }
+ LeaveCriticalSection(&port->lock);
+
+ if (key != NOTIFICATION_KEY && overlapped)
+ handle_entry(overlapped, key, bytes, ok);
+ else if (!overlapped)
+ break;
+ }
+ event_warnx("GetQueuedCompletionStatus exited with no event.");
+ EnterCriticalSection(&port->lock);
+ if (--port->n_live_threads == 0)
+ ReleaseSemaphore(port->shutdownSemaphore, 1, NULL);
+ LeaveCriticalSection(&port->lock);
+}
+
+int
+event_iocp_port_associate_(struct event_iocp_port *port, evutil_socket_t fd,
+ ev_uintptr_t key)
+{
+ HANDLE h;
+ h = CreateIoCompletionPort((HANDLE)fd, port->port, key, port->n_threads);
+ if (!h)
+ return -1;
+ return 0;
+}
+
+static void *
+get_extension_function(SOCKET s, const GUID *which_fn)
+{
+ void *ptr = NULL;
+ DWORD bytes=0;
+ WSAIoctl(s, SIO_GET_EXTENSION_FUNCTION_POINTER,
+ (GUID*)which_fn, sizeof(*which_fn),
+ &ptr, sizeof(ptr),
+ &bytes, NULL, NULL);
+
+ /* No need to detect errors here: if ptr is set, then we have a good
+ function pointer. Otherwise, we should behave as if we had no
+ function pointer.
+ */
+ return ptr;
+}
+
+/* Mingw doesn't have these in its mswsock.h. The values are copied from
+ wine.h. Perhaps if we copy them exactly, the cargo will come again.
+*/
+#ifndef WSAID_ACCEPTEX
+#define WSAID_ACCEPTEX \
+ {0xb5367df1,0xcbac,0x11cf,{0x95,0xca,0x00,0x80,0x5f,0x48,0xa1,0x92}}
+#endif
+#ifndef WSAID_CONNECTEX
+#define WSAID_CONNECTEX \
+ {0x25a207b9,0xddf3,0x4660,{0x8e,0xe9,0x76,0xe5,0x8c,0x74,0x06,0x3e}}
+#endif
+#ifndef WSAID_GETACCEPTEXSOCKADDRS
+#define WSAID_GETACCEPTEXSOCKADDRS \
+ {0xb5367df2,0xcbac,0x11cf,{0x95,0xca,0x00,0x80,0x5f,0x48,0xa1,0x92}}
+#endif
+
+static int extension_fns_initialized = 0;
+
+static void
+init_extension_functions(struct win32_extension_fns *ext)
+{
+ const GUID acceptex = WSAID_ACCEPTEX;
+ const GUID connectex = WSAID_CONNECTEX;
+ const GUID getacceptexsockaddrs = WSAID_GETACCEPTEXSOCKADDRS;
+ SOCKET s = socket(AF_INET, SOCK_STREAM, 0);
+ if (s == INVALID_SOCKET)
+ return;
+ ext->AcceptEx = get_extension_function(s, &acceptex);
+ ext->ConnectEx = get_extension_function(s, &connectex);
+ ext->GetAcceptExSockaddrs = get_extension_function(s,
+ &getacceptexsockaddrs);
+ closesocket(s);
+
+ extension_fns_initialized = 1;
+}
+
+static struct win32_extension_fns the_extension_fns;
+
+const struct win32_extension_fns *
+event_get_win32_extension_fns_(void)
+{
+ return &the_extension_fns;
+}
+
+#define N_CPUS_DEFAULT 2
+
+struct event_iocp_port *
+event_iocp_port_launch_(int n_cpus)
+{
+ struct event_iocp_port *port;
+ int i;
+
+ if (!extension_fns_initialized)
+ init_extension_functions(&the_extension_fns);
+
+ if (!(port = mm_calloc(1, sizeof(struct event_iocp_port))))
+ return NULL;
+
+ if (n_cpus <= 0)
+ n_cpus = N_CPUS_DEFAULT;
+ port->n_threads = n_cpus * 2;
+ port->threads = mm_calloc(port->n_threads, sizeof(HANDLE));
+ if (!port->threads)
+ goto err;
+
+ port->port = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0,
+ n_cpus);
+ port->ms = -1;
+ if (!port->port)
+ goto err;
+
+ port->shutdownSemaphore = CreateSemaphore(NULL, 0, 1, NULL);
+ if (!port->shutdownSemaphore)
+ goto err;
+
+ for (i=0; i<port->n_threads; ++i) {
+ ev_uintptr_t th = _beginthread(loop, 0, port);
+ if (th == (ev_uintptr_t)-1)
+ goto err;
+ port->threads[i] = (HANDLE)th;
+ ++port->n_live_threads;
+ }
+
+ InitializeCriticalSectionAndSpinCount(&port->lock, 1000);
+
+ return port;
+err:
+ if (port->port)
+ CloseHandle(port->port);
+ if (port->threads)
+ mm_free(port->threads);
+ if (port->shutdownSemaphore)
+ CloseHandle(port->shutdownSemaphore);
+ mm_free(port);
+ return NULL;
+}
+
+static void
+event_iocp_port_unlock_and_free_(struct event_iocp_port *port)
+{
+ DeleteCriticalSection(&port->lock);
+ CloseHandle(port->port);
+ CloseHandle(port->shutdownSemaphore);
+ mm_free(port->threads);
+ mm_free(port);
+}
+
+static int
+event_iocp_notify_all(struct event_iocp_port *port)
+{
+ int i, r, ok=1;
+ for (i=0; i<port->n_threads; ++i) {
+ r = PostQueuedCompletionStatus(port->port, 0, NOTIFICATION_KEY,
+ NULL);
+ if (!r)
+ ok = 0;
+ }
+ return ok ? 0 : -1;
+}
+
+int
+event_iocp_shutdown_(struct event_iocp_port *port, long waitMsec)
+{
+ DWORD ms = INFINITE;
+ int n;
+
+ EnterCriticalSection(&port->lock);
+ port->shutdown = 1;
+ LeaveCriticalSection(&port->lock);
+ event_iocp_notify_all(port);
+
+ if (waitMsec >= 0)
+ ms = waitMsec;
+
+ WaitForSingleObject(port->shutdownSemaphore, ms);
+ EnterCriticalSection(&port->lock);
+ n = port->n_live_threads;
+ LeaveCriticalSection(&port->lock);
+ if (n == 0) {
+ event_iocp_port_unlock_and_free_(port);
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+int
+event_iocp_activate_overlapped_(
+ struct event_iocp_port *port, struct event_overlapped *o,
+ ev_uintptr_t key, ev_uint32_t n)
+{
+ BOOL r;
+
+ r = PostQueuedCompletionStatus(port->port, n, key, &o->overlapped);
+ return (r==0) ? -1 : 0;
+}
+
+struct event_iocp_port *
+event_base_get_iocp_(struct event_base *base)
+{
+#ifdef _WIN32
+ return base->iocp;
+#else
+ return NULL;
+#endif
+}
diff --git a/libs/libevent/src/event_tagging.c b/libs/libevent/src/event_tagging.c
new file mode 100644
index 0000000000..6459dfa72e
--- /dev/null
+++ b/libs/libevent/src/event_tagging.c
@@ -0,0 +1,605 @@
+/*
+ * Copyright (c) 2003-2009 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef EVENT__HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef EVENT__HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+
+#ifdef _WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <winsock2.h>
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+
+#ifdef EVENT__HAVE_SYS_IOCTL_H
+#include <sys/ioctl.h>
+#endif
+#include <sys/queue.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _WIN32
+#include <syslog.h>
+#endif
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <limits.h>
+
+#include "event2/event.h"
+#include "event2/tag.h"
+#include "event2/buffer.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "util-internal.h"
+
+/*
+ Here's our wire format:
+
+ Stream = TaggedData*
+
+ TaggedData = Tag Length Data
+ where the integer value of 'Length' is the length of 'data'.
+
+ Tag = HByte* LByte
+ where HByte is a byte with the high bit set, and LByte is a byte
+ with the high bit clear. The integer value of the tag is taken
+ by concatenating the lower 7 bits from all the tags. So for example,
+ the tag 0x66 is encoded as [66], whereas the tag 0x166 is encoded as
+ [82 66]
+
+ Length = Integer
+
+ Integer = NNibbles Nibble* Padding?
+ where NNibbles is a 4-bit value encoding the number of nibbles-1,
+ and each Nibble is 4 bits worth of encoded integer, in big-endian
+ order. If the total encoded integer size is an odd number of nibbles,
+ a final padding nibble with value 0 is appended.
+*/
+
+int evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf);
+int evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf);
+int evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t tag);
+int evtag_decode_tag(ev_uint32_t *ptag, struct evbuffer *evbuf);
+
+void
+evtag_init(void)
+{
+}
+
+/*
+ * We encode integers by nibbles; the first nibble contains the number
+ * of significant nibbles - 1; this allows us to encode up to 64-bit
+ * integers. This function is byte-order independent.
+ *
+ * @param number a 32-bit unsigned integer to encode
+ * @param data a pointer to where the data should be written. Must
+ * have at least 5 bytes free.
+ * @return the number of bytes written into data.
+ */
+
+#define ENCODE_INT_INTERNAL(data, number) do { \
+ int off = 1, nibbles = 0; \
+ \
+ memset(data, 0, sizeof(number)+1); \
+ while (number) { \
+ if (off & 0x1) \
+ data[off/2] = (data[off/2] & 0xf0) | (number & 0x0f); \
+ else \
+ data[off/2] = (data[off/2] & 0x0f) | \
+ ((number & 0x0f) << 4); \
+ number >>= 4; \
+ off++; \
+ } \
+ \
+ if (off > 2) \
+ nibbles = off - 2; \
+ \
+ /* Off - 1 is the number of encoded nibbles */ \
+ data[0] = (data[0] & 0x0f) | ((nibbles & 0x0f) << 4); \
+ \
+ return ((off + 1) / 2); \
+} while (0)
+
+static inline int
+encode_int_internal(ev_uint8_t *data, ev_uint32_t number)
+{
+ ENCODE_INT_INTERNAL(data, number);
+}
+
+static inline int
+encode_int64_internal(ev_uint8_t *data, ev_uint64_t number)
+{
+ ENCODE_INT_INTERNAL(data, number);
+}
+
+void
+evtag_encode_int(struct evbuffer *evbuf, ev_uint32_t number)
+{
+ ev_uint8_t data[5];
+ int len = encode_int_internal(data, number);
+ evbuffer_add(evbuf, data, len);
+}
+
+void
+evtag_encode_int64(struct evbuffer *evbuf, ev_uint64_t number)
+{
+ ev_uint8_t data[9];
+ int len = encode_int64_internal(data, number);
+ evbuffer_add(evbuf, data, len);
+}
+
+/*
+ * Support variable length encoding of tags; we use the high bit in each
+ * octet as a continuation signal.
+ */
+
+int
+evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t tag)
+{
+ int bytes = 0;
+ ev_uint8_t data[5];
+
+ memset(data, 0, sizeof(data));
+ do {
+ ev_uint8_t lower = tag & 0x7f;
+ tag >>= 7;
+
+ if (tag)
+ lower |= 0x80;
+
+ data[bytes++] = lower;
+ } while (tag);
+
+ if (evbuf != NULL)
+ evbuffer_add(evbuf, data, bytes);
+
+ return (bytes);
+}
+
+static int
+decode_tag_internal(ev_uint32_t *ptag, struct evbuffer *evbuf, int dodrain)
+{
+ ev_uint32_t number = 0;
+ size_t len = evbuffer_get_length(evbuf);
+ ev_uint8_t *data;
+ size_t count = 0;
+ int shift = 0, done = 0;
+
+ /*
+ * the encoding of a number is at most one byte more than its
+ * storage size. however, it may also be much smaller.
+ */
+ data = evbuffer_pullup(
+ evbuf, len < sizeof(number) + 1 ? len : sizeof(number) + 1);
+ if (!data)
+ return (-1);
+
+ while (count++ < len) {
+ ev_uint8_t lower = *data++;
+ if (shift >= 28) {
+ /* Make sure it fits into 32 bits */
+ if (shift > 28)
+ return (-1);
+ if ((lower & 0x7f) > 15)
+ return (-1);
+ }
+ number |= (lower & (unsigned)0x7f) << shift;
+ shift += 7;
+
+ if (!(lower & 0x80)) {
+ done = 1;
+ break;
+ }
+ }
+
+ if (!done)
+ return (-1);
+
+ if (dodrain)
+ evbuffer_drain(evbuf, count);
+
+ if (ptag != NULL)
+ *ptag = number;
+
+ return count > INT_MAX ? INT_MAX : (int)(count);
+}
+
+int
+evtag_decode_tag(ev_uint32_t *ptag, struct evbuffer *evbuf)
+{
+ return (decode_tag_internal(ptag, evbuf, 1 /* dodrain */));
+}
+
+/*
+ * Marshal a data type, the general format is as follows:
+ *
+ * tag number: one byte; length: var bytes; payload: var bytes
+ */
+
+void
+evtag_marshal(struct evbuffer *evbuf, ev_uint32_t tag,
+ const void *data, ev_uint32_t len)
+{
+ evtag_encode_tag(evbuf, tag);
+ evtag_encode_int(evbuf, len);
+ evbuffer_add(evbuf, (void *)data, len);
+}
+
+void
+evtag_marshal_buffer(struct evbuffer *evbuf, ev_uint32_t tag,
+ struct evbuffer *data)
+{
+ evtag_encode_tag(evbuf, tag);
+ /* XXX support more than UINT32_MAX data */
+ evtag_encode_int(evbuf, (ev_uint32_t)evbuffer_get_length(data));
+ evbuffer_add_buffer(evbuf, data);
+}
+
+/* Marshaling for integers */
+void
+evtag_marshal_int(struct evbuffer *evbuf, ev_uint32_t tag, ev_uint32_t integer)
+{
+ ev_uint8_t data[5];
+ int len = encode_int_internal(data, integer);
+
+ evtag_encode_tag(evbuf, tag);
+ evtag_encode_int(evbuf, len);
+ evbuffer_add(evbuf, data, len);
+}
+
+void
+evtag_marshal_int64(struct evbuffer *evbuf, ev_uint32_t tag,
+ ev_uint64_t integer)
+{
+ ev_uint8_t data[9];
+ int len = encode_int64_internal(data, integer);
+
+ evtag_encode_tag(evbuf, tag);
+ evtag_encode_int(evbuf, len);
+ evbuffer_add(evbuf, data, len);
+}
+
+void
+evtag_marshal_string(struct evbuffer *buf, ev_uint32_t tag, const char *string)
+{
+ /* TODO support strings longer than UINT32_MAX ? */
+ evtag_marshal(buf, tag, string, (ev_uint32_t)strlen(string));
+}
+
+void
+evtag_marshal_timeval(struct evbuffer *evbuf, ev_uint32_t tag, struct timeval *tv)
+{
+ ev_uint8_t data[10];
+ int len = encode_int_internal(data, tv->tv_sec);
+ len += encode_int_internal(data + len, tv->tv_usec);
+ evtag_marshal(evbuf, tag, data, len);
+}
+
+#define DECODE_INT_INTERNAL(number, maxnibbles, pnumber, evbuf, offset) \
+do { \
+ ev_uint8_t *data; \
+ ev_ssize_t len = evbuffer_get_length(evbuf) - offset; \
+ int nibbles = 0; \
+ \
+ if (len <= 0) \
+ return (-1); \
+ \
+ /* XXX(niels): faster? */ \
+ data = evbuffer_pullup(evbuf, offset + 1) + offset; \
+ if (!data) \
+ return (-1); \
+ \
+ nibbles = ((data[0] & 0xf0) >> 4) + 1; \
+ if (nibbles > maxnibbles || (nibbles >> 1) + 1 > len) \
+ return (-1); \
+ len = (nibbles >> 1) + 1; \
+ \
+ data = evbuffer_pullup(evbuf, offset + len) + offset; \
+ if (!data) \
+ return (-1); \
+ \
+ while (nibbles > 0) { \
+ number <<= 4; \
+ if (nibbles & 0x1) \
+ number |= data[nibbles >> 1] & 0x0f; \
+ else \
+ number |= (data[nibbles >> 1] & 0xf0) >> 4; \
+ nibbles--; \
+ } \
+ \
+ *pnumber = number; \
+ \
+ return (int)(len); \
+} while (0)
+
+/* Internal: decode an integer from an evbuffer, without draining it.
+ * Only integers up to 32-bits are supported.
+ *
+ * @param evbuf the buffer to read from
+ * @param offset an index into the buffer at which we should start reading.
+ * @param pnumber a pointer to receive the integer.
+ * @return The length of the number as encoded, or -1 on error.
+ */
+
+static int
+decode_int_internal(ev_uint32_t *pnumber, struct evbuffer *evbuf, int offset)
+{
+ ev_uint32_t number = 0;
+ DECODE_INT_INTERNAL(number, 8, pnumber, evbuf, offset);
+}
+
+static int
+decode_int64_internal(ev_uint64_t *pnumber, struct evbuffer *evbuf, int offset)
+{
+ ev_uint64_t number = 0;
+ DECODE_INT_INTERNAL(number, 16, pnumber, evbuf, offset);
+}
+
+int
+evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf)
+{
+ int res = decode_int_internal(pnumber, evbuf, 0);
+ if (res != -1)
+ evbuffer_drain(evbuf, res);
+
+ return (res == -1 ? -1 : 0);
+}
+
+int
+evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf)
+{
+ int res = decode_int64_internal(pnumber, evbuf, 0);
+ if (res != -1)
+ evbuffer_drain(evbuf, res);
+
+ return (res == -1 ? -1 : 0);
+}
+
+int
+evtag_peek(struct evbuffer *evbuf, ev_uint32_t *ptag)
+{
+ return (decode_tag_internal(ptag, evbuf, 0 /* dodrain */));
+}
+
+int
+evtag_peek_length(struct evbuffer *evbuf, ev_uint32_t *plength)
+{
+ int res, len;
+
+ len = decode_tag_internal(NULL, evbuf, 0 /* dodrain */);
+ if (len == -1)
+ return (-1);
+
+ res = decode_int_internal(plength, evbuf, len);
+ if (res == -1)
+ return (-1);
+
+ *plength += res + len;
+
+ return (0);
+}
+
+int
+evtag_payload_length(struct evbuffer *evbuf, ev_uint32_t *plength)
+{
+ int res, len;
+
+ len = decode_tag_internal(NULL, evbuf, 0 /* dodrain */);
+ if (len == -1)
+ return (-1);
+
+ res = decode_int_internal(plength, evbuf, len);
+ if (res == -1)
+ return (-1);
+
+ return (0);
+}
+
+/* just unmarshals the header and returns the length of the remaining data */
+
+int
+evtag_unmarshal_header(struct evbuffer *evbuf, ev_uint32_t *ptag)
+{
+ ev_uint32_t len;
+
+ if (decode_tag_internal(ptag, evbuf, 1 /* dodrain */) == -1)
+ return (-1);
+ if (evtag_decode_int(&len, evbuf) == -1)
+ return (-1);
+
+ if (evbuffer_get_length(evbuf) < len)
+ return (-1);
+
+ return (len);
+}
+
+int
+evtag_consume(struct evbuffer *evbuf)
+{
+ int len;
+ if ((len = evtag_unmarshal_header(evbuf, NULL)) == -1)
+ return (-1);
+ evbuffer_drain(evbuf, len);
+
+ return (0);
+}
+
+/* Reads the data type from an event buffer */
+
+int
+evtag_unmarshal(struct evbuffer *src, ev_uint32_t *ptag, struct evbuffer *dst)
+{
+ int len;
+
+ if ((len = evtag_unmarshal_header(src, ptag)) == -1)
+ return (-1);
+
+ if (evbuffer_add(dst, evbuffer_pullup(src, len), len) == -1)
+ return (-1);
+
+ evbuffer_drain(src, len);
+
+ return (len);
+}
+
+/* Marshaling for integers */
+
+int
+evtag_unmarshal_int(struct evbuffer *evbuf, ev_uint32_t need_tag,
+ ev_uint32_t *pinteger)
+{
+ ev_uint32_t tag;
+ ev_uint32_t len;
+ int result;
+
+ if (decode_tag_internal(&tag, evbuf, 1 /* dodrain */) == -1)
+ return (-1);
+ if (need_tag != tag)
+ return (-1);
+ if (evtag_decode_int(&len, evbuf) == -1)
+ return (-1);
+
+ if (evbuffer_get_length(evbuf) < len)
+ return (-1);
+
+ result = decode_int_internal(pinteger, evbuf, 0);
+ evbuffer_drain(evbuf, len);
+ if (result < 0 || (size_t)result > len) /* XXX Should this be != rather than > ?*/
+ return (-1);
+ else
+ return result;
+}
+
+int
+evtag_unmarshal_int64(struct evbuffer *evbuf, ev_uint32_t need_tag,
+ ev_uint64_t *pinteger)
+{
+ ev_uint32_t tag;
+ ev_uint32_t len;
+ int result;
+
+ if (decode_tag_internal(&tag, evbuf, 1 /* dodrain */) == -1)
+ return (-1);
+ if (need_tag != tag)
+ return (-1);
+ if (evtag_decode_int(&len, evbuf) == -1)
+ return (-1);
+
+ if (evbuffer_get_length(evbuf) < len)
+ return (-1);
+
+ result = decode_int64_internal(pinteger, evbuf, 0);
+ evbuffer_drain(evbuf, len);
+ if (result < 0 || (size_t)result > len) /* XXX Should this be != rather than > ?*/
+ return (-1);
+ else
+ return result;
+}
+
+/* Unmarshal a fixed length tag */
+
+int
+evtag_unmarshal_fixed(struct evbuffer *src, ev_uint32_t need_tag, void *data,
+ size_t len)
+{
+ ev_uint32_t tag;
+ int tag_len;
+
+ /* Now unmarshal a tag and check that it matches the tag we want */
+ if ((tag_len = evtag_unmarshal_header(src, &tag)) < 0 ||
+ tag != need_tag)
+ return (-1);
+
+ if ((size_t)tag_len != len)
+ return (-1);
+
+ evbuffer_remove(src, data, len);
+ return (0);
+}
+
+int
+evtag_unmarshal_string(struct evbuffer *evbuf, ev_uint32_t need_tag,
+ char **pstring)
+{
+ ev_uint32_t tag;
+ int tag_len;
+
+ if ((tag_len = evtag_unmarshal_header(evbuf, &tag)) == -1 ||
+ tag != need_tag)
+ return (-1);
+
+ *pstring = mm_malloc(tag_len + 1);
+ if (*pstring == NULL) {
+ event_warn("%s: malloc", __func__);
+ return -1;
+ }
+ evbuffer_remove(evbuf, *pstring, tag_len);
+ (*pstring)[tag_len] = '\0';
+
+ return (0);
+}
+
+int
+evtag_unmarshal_timeval(struct evbuffer *evbuf, ev_uint32_t need_tag,
+ struct timeval *ptv)
+{
+ ev_uint32_t tag;
+ ev_uint32_t integer;
+ int len, offset, offset2;
+ int result = -1;
+
+ if ((len = evtag_unmarshal_header(evbuf, &tag)) == -1)
+ return (-1);
+ if (tag != need_tag)
+ goto done;
+ if ((offset = decode_int_internal(&integer, evbuf, 0)) == -1)
+ goto done;
+ ptv->tv_sec = integer;
+ if ((offset2 = decode_int_internal(&integer, evbuf, offset)) == -1)
+ goto done;
+ ptv->tv_usec = integer;
+ if (offset + offset2 > len) /* XXX Should this be != instead of > ? */
+ goto done;
+
+ result = 0;
+ done:
+ evbuffer_drain(evbuf, len);
+ return result;
+}
diff --git a/libs/libevent/src/evmap-internal.h b/libs/libevent/src/evmap-internal.h
new file mode 100644
index 0000000000..dfc81d5087
--- /dev/null
+++ b/libs/libevent/src/evmap-internal.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVMAP_INTERNAL_H_INCLUDED_
+#define EVMAP_INTERNAL_H_INCLUDED_
+
+/** @file evmap-internal.h
+ *
+ * An event_map is a utility structure to map each fd or signal to zero or
+ * more events. Functions to manipulate event_maps should only be used from
+ * inside libevent. They generally need to hold the lock on the corresponding
+ * event_base.
+ **/
+
+struct event_base;
+struct event;
+
+/** Initialize an event_map for use.
+ */
+void evmap_io_initmap_(struct event_io_map* ctx);
+void evmap_signal_initmap_(struct event_signal_map* ctx);
+
+/** Remove all entries from an event_map.
+
+ @param ctx the map to clear.
+ */
+void evmap_io_clear_(struct event_io_map* ctx);
+void evmap_signal_clear_(struct event_signal_map* ctx);
+
+/** Add an IO event (some combination of EV_READ or EV_WRITE) to an
+ event_base's list of events on a given file descriptor, and tell the
+ underlying eventops about the fd if its state has changed.
+
+ Requires that ev is not already added.
+
+ @param base the event_base to operate on.
+ @param fd the file descriptor corresponding to ev.
+ @param ev the event to add.
+*/
+int evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev);
+/** Remove an IO event (some combination of EV_READ or EV_WRITE) to an
+ event_base's list of events on a given file descriptor, and tell the
+ underlying eventops about the fd if its state has changed.
+
+ @param base the event_base to operate on.
+ @param fd the file descriptor corresponding to ev.
+ @param ev the event to remove.
+ */
+int evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev);
+/** Active the set of events waiting on an event_base for a given fd.
+
+ @param base the event_base to operate on.
+ @param fd the file descriptor that has become active.
+ @param events a bitmask of EV_READ|EV_WRITE|EV_ET.
+*/
+void evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events);
+
+
+/* These functions behave in the same way as evmap_io_*, except they work on
+ * signals rather than fds. signals use a linear map everywhere; fds use
+ * either a linear map or a hashtable. */
+int evmap_signal_add_(struct event_base *base, int signum, struct event *ev);
+int evmap_signal_del_(struct event_base *base, int signum, struct event *ev);
+void evmap_signal_active_(struct event_base *base, evutil_socket_t signum, int ncalls);
+
+/* Return the fdinfo object associated with a given fd. If the fd has no
+ * events associated with it, the result may be NULL.
+ */
+void *evmap_io_get_fdinfo_(struct event_io_map *ctx, evutil_socket_t fd);
+
+/* Helper for event_reinit(): Tell the backend to re-add every fd and signal
+ * for which we have a pending event.
+ */
+int evmap_reinit_(struct event_base *base);
+
+/* Helper for event_base_free(): Call event_del() on every pending fd and
+ * signal event.
+ */
+void evmap_delete_all_(struct event_base *base);
+
+/* Helper for event_base_assert_ok_(): Check referential integrity of the
+ * evmaps.
+ */
+void evmap_check_integrity_(struct event_base *base);
+
+/* Helper: Call fn on every fd or signal event, passing as its arguments the
+ * provided event_base, the event, and arg. If fn returns 0, process the next
+ * event. If it returns any other value, return that value and process no
+ * more events.
+ */
+int evmap_foreach_event_(struct event_base *base,
+ event_base_foreach_event_cb fn,
+ void *arg);
+
+#endif /* EVMAP_INTERNAL_H_INCLUDED_ */
diff --git a/libs/libevent/src/evmap.c b/libs/libevent/src/evmap.c
new file mode 100644
index 0000000000..3f76dd0ae1
--- /dev/null
+++ b/libs/libevent/src/evmap.c
@@ -0,0 +1,1055 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+#include <sys/types.h>
+#if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#include <stdio.h>
+#include <stdlib.h>
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <time.h>
+
+#include "event-internal.h"
+#include "evmap-internal.h"
+#include "mm-internal.h"
+#include "changelist-internal.h"
+
+/** An entry for an evmap_io list: notes all the events that want to read or
+ write on a given fd, and the number of each.
+ */
+struct evmap_io {
+ struct event_dlist events;
+ ev_uint16_t nread;
+ ev_uint16_t nwrite;
+ ev_uint16_t nclose;
+};
+
+/* An entry for an evmap_signal list: notes all the events that want to know
+ when a signal triggers. */
+struct evmap_signal {
+ struct event_dlist events;
+};
+
+/* On some platforms, fds start at 0 and increment by 1 as they are
+ allocated, and old numbers get used. For these platforms, we
+ implement io maps just like signal maps: as an array of pointers to
+ struct evmap_io. But on other platforms (windows), sockets are not
+ 0-indexed, not necessarily consecutive, and not necessarily reused.
+ There, we use a hashtable to implement evmap_io.
+*/
+#ifdef EVMAP_USE_HT
+struct event_map_entry {
+ HT_ENTRY(event_map_entry) map_node;
+ evutil_socket_t fd;
+ union { /* This is a union in case we need to make more things that can
+ be in the hashtable. */
+ struct evmap_io evmap_io;
+ } ent;
+};
+
+/* Helper used by the event_io_map hashtable code; tries to return a good hash
+ * of the fd in e->fd. */
+static inline unsigned
+hashsocket(struct event_map_entry *e)
+{
+ /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
+ * matter. Our hashtable implementation really likes low-order bits,
+ * though, so let's do the rotate-and-add trick. */
+ unsigned h = (unsigned) e->fd;
+ h += (h >> 2) | (h << 30);
+ return h;
+}
+
+/* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
+ * have the same e->fd. */
+static inline int
+eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
+{
+ return e1->fd == e2->fd;
+}
+
+HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)
+HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
+ 0.5, mm_malloc, mm_realloc, mm_free)
+
+#define GET_IO_SLOT(x, map, slot, type) \
+ do { \
+ struct event_map_entry key_, *ent_; \
+ key_.fd = slot; \
+ ent_ = HT_FIND(event_io_map, map, &key_); \
+ (x) = ent_ ? &ent_->ent.type : NULL; \
+ } while (0);
+
+#define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
+ do { \
+ struct event_map_entry key_, *ent_; \
+ key_.fd = slot; \
+ HT_FIND_OR_INSERT_(event_io_map, map_node, hashsocket, map, \
+ event_map_entry, &key_, ptr, \
+ { \
+ ent_ = *ptr; \
+ }, \
+ { \
+ ent_ = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
+ if (EVUTIL_UNLIKELY(ent_ == NULL)) \
+ return (-1); \
+ ent_->fd = slot; \
+ (ctor)(&ent_->ent.type); \
+ HT_FOI_INSERT_(map_node, map, &key_, ent_, ptr) \
+ }); \
+ (x) = &ent_->ent.type; \
+ } while (0)
+
+void evmap_io_initmap_(struct event_io_map *ctx)
+{
+ HT_INIT(event_io_map, ctx);
+}
+
+void evmap_io_clear_(struct event_io_map *ctx)
+{
+ struct event_map_entry **ent, **next, *this;
+ for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
+ this = *ent;
+ next = HT_NEXT_RMV(event_io_map, ctx, ent);
+ mm_free(this);
+ }
+ HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */
+}
+#endif
+
+/* Set the variable 'x' to the field in event_map 'map' with fields of type
+ 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL
+ if there are no entries for 'slot'. Does no bounds-checking. */
+#define GET_SIGNAL_SLOT(x, map, slot, type) \
+ (x) = (struct type *)((map)->entries[slot])
+/* As GET_SLOT, but construct the entry for 'slot' if it is not present,
+ by allocating enough memory for a 'struct type', and initializing the new
+ value by calling the function 'ctor' on it. Makes the function
+ return -1 on allocation failure.
+ */
+#define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
+ do { \
+ if ((map)->entries[slot] == NULL) { \
+ (map)->entries[slot] = \
+ mm_calloc(1,sizeof(struct type)+fdinfo_len); \
+ if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \
+ return (-1); \
+ (ctor)((struct type *)(map)->entries[slot]); \
+ } \
+ (x) = (struct type *)((map)->entries[slot]); \
+ } while (0)
+
+/* If we aren't using hashtables, then define the IO_SLOT macros and functions
+ as thin aliases over the SIGNAL_SLOT versions. */
+#ifndef EVMAP_USE_HT
+#define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
+#define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \
+ GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
+#define FDINFO_OFFSET sizeof(struct evmap_io)
+void
+evmap_io_initmap_(struct event_io_map* ctx)
+{
+ evmap_signal_initmap_(ctx);
+}
+void
+evmap_io_clear_(struct event_io_map* ctx)
+{
+ evmap_signal_clear_(ctx);
+}
+#endif
+
+
+/** Expand 'map' with new entries of width 'msize' until it is big enough
+ to store a value in 'slot'.
+ */
+static int
+evmap_make_space(struct event_signal_map *map, int slot, int msize)
+{
+ if (map->nentries <= slot) {
+ int nentries = map->nentries ? map->nentries : 32;
+ void **tmp;
+
+ while (nentries <= slot)
+ nentries <<= 1;
+
+ tmp = (void **)mm_realloc(map->entries, nentries * msize);
+ if (tmp == NULL)
+ return (-1);
+
+ memset(&tmp[map->nentries], 0,
+ (nentries - map->nentries) * msize);
+
+ map->nentries = nentries;
+ map->entries = tmp;
+ }
+
+ return (0);
+}
+
+void
+evmap_signal_initmap_(struct event_signal_map *ctx)
+{
+ ctx->nentries = 0;
+ ctx->entries = NULL;
+}
+
+void
+evmap_signal_clear_(struct event_signal_map *ctx)
+{
+ if (ctx->entries != NULL) {
+ int i;
+ for (i = 0; i < ctx->nentries; ++i) {
+ if (ctx->entries[i] != NULL)
+ mm_free(ctx->entries[i]);
+ }
+ mm_free(ctx->entries);
+ ctx->entries = NULL;
+ }
+ ctx->nentries = 0;
+}
+
+
+/* code specific to file descriptors */
+
+/** Constructor for struct evmap_io */
+static void
+evmap_io_init(struct evmap_io *entry)
+{
+ LIST_INIT(&entry->events);
+ entry->nread = 0;
+ entry->nwrite = 0;
+ entry->nclose = 0;
+}
+
+
+/* return -1 on error, 0 on success if nothing changed in the event backend,
+ * and 1 on success if something did. */
+int
+evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev)
+{
+ const struct eventop *evsel = base->evsel;
+ struct event_io_map *io = &base->io;
+ struct evmap_io *ctx = NULL;
+ int nread, nwrite, nclose, retval = 0;
+ short res = 0, old = 0;
+ struct event *old_ev;
+
+ EVUTIL_ASSERT(fd == ev->ev_fd);
+
+ if (fd < 0)
+ return 0;
+
+#ifndef EVMAP_USE_HT
+ if (fd >= io->nentries) {
+ if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
+ return (-1);
+ }
+#endif
+ GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
+ evsel->fdinfo_len);
+
+ nread = ctx->nread;
+ nwrite = ctx->nwrite;
+ nclose = ctx->nclose;
+
+ if (nread)
+ old |= EV_READ;
+ if (nwrite)
+ old |= EV_WRITE;
+ if (nclose)
+ old |= EV_CLOSED;
+
+ if (ev->ev_events & EV_READ) {
+ if (++nread == 1)
+ res |= EV_READ;
+ }
+ if (ev->ev_events & EV_WRITE) {
+ if (++nwrite == 1)
+ res |= EV_WRITE;
+ }
+ if (ev->ev_events & EV_CLOSED) {
+ if (++nclose == 1)
+ res |= EV_CLOSED;
+ }
+ if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff || nclose > 0xffff)) {
+ event_warnx("Too many events reading or writing on fd %d",
+ (int)fd);
+ return -1;
+ }
+ if (EVENT_DEBUG_MODE_IS_ON() &&
+ (old_ev = LIST_FIRST(&ctx->events)) &&
+ (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
+ event_warnx("Tried to mix edge-triggered and non-edge-triggered"
+ " events on fd %d", (int)fd);
+ return -1;
+ }
+
+ if (res) {
+ void *extra = ((char*)ctx) + sizeof(struct evmap_io);
+ /* XXX(niels): we cannot mix edge-triggered and
+ * level-triggered, we should probably assert on
+ * this. */
+ if (evsel->add(base, ev->ev_fd,
+ old, (ev->ev_events & EV_ET) | res, extra) == -1)
+ return (-1);
+ retval = 1;
+ }
+
+ ctx->nread = (ev_uint16_t) nread;
+ ctx->nwrite = (ev_uint16_t) nwrite;
+ ctx->nclose = (ev_uint16_t) nclose;
+ LIST_INSERT_HEAD(&ctx->events, ev, ev_io_next);
+
+ return (retval);
+}
+
+/* return -1 on error, 0 on success if nothing changed in the event backend,
+ * and 1 on success if something did. */
+int
+evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev)
+{
+ const struct eventop *evsel = base->evsel;
+ struct event_io_map *io = &base->io;
+ struct evmap_io *ctx;
+ int nread, nwrite, nclose, retval = 0;
+ short res = 0, old = 0;
+
+ if (fd < 0)
+ return 0;
+
+ EVUTIL_ASSERT(fd == ev->ev_fd);
+
+#ifndef EVMAP_USE_HT
+ if (fd >= io->nentries)
+ return (-1);
+#endif
+
+ GET_IO_SLOT(ctx, io, fd, evmap_io);
+
+ nread = ctx->nread;
+ nwrite = ctx->nwrite;
+ nclose = ctx->nclose;
+
+ if (nread)
+ old |= EV_READ;
+ if (nwrite)
+ old |= EV_WRITE;
+ if (nclose)
+ old |= EV_CLOSED;
+
+ if (ev->ev_events & EV_READ) {
+ if (--nread == 0)
+ res |= EV_READ;
+ EVUTIL_ASSERT(nread >= 0);
+ }
+ if (ev->ev_events & EV_WRITE) {
+ if (--nwrite == 0)
+ res |= EV_WRITE;
+ EVUTIL_ASSERT(nwrite >= 0);
+ }
+ if (ev->ev_events & EV_CLOSED) {
+ if (--nclose == 0)
+ res |= EV_CLOSED;
+ EVUTIL_ASSERT(nclose >= 0);
+ }
+
+ if (res) {
+ void *extra = ((char*)ctx) + sizeof(struct evmap_io);
+ if (evsel->del(base, ev->ev_fd, old, res, extra) == -1) {
+ retval = -1;
+ } else {
+ retval = 1;
+ }
+ }
+
+ ctx->nread = nread;
+ ctx->nwrite = nwrite;
+ ctx->nclose = nclose;
+ LIST_REMOVE(ev, ev_io_next);
+
+ return (retval);
+}
+
+void
+evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events)
+{
+ struct event_io_map *io = &base->io;
+ struct evmap_io *ctx;
+ struct event *ev;
+
+#ifndef EVMAP_USE_HT
+ if (fd < 0 || fd >= io->nentries)
+ return;
+#endif
+ GET_IO_SLOT(ctx, io, fd, evmap_io);
+
+ if (NULL == ctx)
+ return;
+ LIST_FOREACH(ev, &ctx->events, ev_io_next) {
+ if (ev->ev_events & events)
+ event_active_nolock_(ev, ev->ev_events & events, 1);
+ }
+}
+
+/* code specific to signals */
+
+static void
+evmap_signal_init(struct evmap_signal *entry)
+{
+ LIST_INIT(&entry->events);
+}
+
+
+int
+evmap_signal_add_(struct event_base *base, int sig, struct event *ev)
+{
+ const struct eventop *evsel = base->evsigsel;
+ struct event_signal_map *map = &base->sigmap;
+ struct evmap_signal *ctx = NULL;
+
+ if (sig >= map->nentries) {
+ if (evmap_make_space(
+ map, sig, sizeof(struct evmap_signal *)) == -1)
+ return (-1);
+ }
+ GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
+ base->evsigsel->fdinfo_len);
+
+ if (LIST_EMPTY(&ctx->events)) {
+ if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
+ == -1)
+ return (-1);
+ }
+
+ LIST_INSERT_HEAD(&ctx->events, ev, ev_signal_next);
+
+ return (1);
+}
+
+int
+evmap_signal_del_(struct event_base *base, int sig, struct event *ev)
+{
+ const struct eventop *evsel = base->evsigsel;
+ struct event_signal_map *map = &base->sigmap;
+ struct evmap_signal *ctx;
+
+ if (sig >= map->nentries)
+ return (-1);
+
+ GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
+
+ LIST_REMOVE(ev, ev_signal_next);
+
+ if (LIST_FIRST(&ctx->events) == NULL) {
+ if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
+ return (-1);
+ }
+
+ return (1);
+}
+
+void
+evmap_signal_active_(struct event_base *base, evutil_socket_t sig, int ncalls)
+{
+ struct event_signal_map *map = &base->sigmap;
+ struct evmap_signal *ctx;
+ struct event *ev;
+
+ if (sig < 0 || sig >= map->nentries)
+ return;
+ GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
+
+ if (!ctx)
+ return;
+ LIST_FOREACH(ev, &ctx->events, ev_signal_next)
+ event_active_nolock_(ev, EV_SIGNAL, ncalls);
+}
+
+void *
+evmap_io_get_fdinfo_(struct event_io_map *map, evutil_socket_t fd)
+{
+ struct evmap_io *ctx;
+ GET_IO_SLOT(ctx, map, fd, evmap_io);
+ if (ctx)
+ return ((char*)ctx) + sizeof(struct evmap_io);
+ else
+ return NULL;
+}
+
+/* Callback type for evmap_io_foreach_fd */
+typedef int (*evmap_io_foreach_fd_cb)(
+ struct event_base *, evutil_socket_t, struct evmap_io *, void *);
+
+/* Multipurpose helper function: Iterate over every file descriptor event_base
+ * for which we could have EV_READ or EV_WRITE events. For each such fd, call
+ * fn(base, signum, evmap_io, arg), where fn is the user-provided
+ * function, base is the event_base, signum is the signal number, evmap_io
+ * is an evmap_io structure containing a list of events pending on the
+ * file descriptor, and arg is the user-supplied argument.
+ *
+ * If fn returns 0, continue on to the next signal. Otherwise, return the same
+ * value that fn returned.
+ *
+ * Note that there is no guarantee that the file descriptors will be processed
+ * in any particular order.
+ */
+static int
+evmap_io_foreach_fd(struct event_base *base,
+ evmap_io_foreach_fd_cb fn,
+ void *arg)
+{
+ evutil_socket_t fd;
+ struct event_io_map *iomap = &base->io;
+ int r = 0;
+#ifdef EVMAP_USE_HT
+ struct event_map_entry **mapent;
+ HT_FOREACH(mapent, event_io_map, iomap) {
+ struct evmap_io *ctx = &(*mapent)->ent.evmap_io;
+ fd = (*mapent)->fd;
+#else
+ for (fd = 0; fd < iomap->nentries; ++fd) {
+ struct evmap_io *ctx = iomap->entries[fd];
+ if (!ctx)
+ continue;
+#endif
+ if ((r = fn(base, fd, ctx, arg)))
+ break;
+ }
+ return r;
+}
+
+/* Callback type for evmap_signal_foreach_signal */
+typedef int (*evmap_signal_foreach_signal_cb)(
+ struct event_base *, int, struct evmap_signal *, void *);
+
+/* Multipurpose helper function: Iterate over every signal number in the
+ * event_base for which we could have signal events. For each such signal,
+ * call fn(base, signum, evmap_signal, arg), where fn is the user-provided
+ * function, base is the event_base, signum is the signal number, evmap_signal
+ * is an evmap_signal structure containing a list of events pending on the
+ * signal, and arg is the user-supplied argument.
+ *
+ * If fn returns 0, continue on to the next signal. Otherwise, return the same
+ * value that fn returned.
+ */
+static int
+evmap_signal_foreach_signal(struct event_base *base,
+ evmap_signal_foreach_signal_cb fn,
+ void *arg)
+{
+ struct event_signal_map *sigmap = &base->sigmap;
+ int r = 0;
+ int signum;
+
+ for (signum = 0; signum < sigmap->nentries; ++signum) {
+ struct evmap_signal *ctx = sigmap->entries[signum];
+ if (!ctx)
+ continue;
+ if ((r = fn(base, signum, ctx, arg)))
+ break;
+ }
+ return r;
+}
+
+/* Helper for evmap_reinit_: tell the backend to add every fd for which we have
+ * pending events, with the appropriate combination of EV_READ, EV_WRITE, and
+ * EV_ET. */
+static int
+evmap_io_reinit_iter_fn(struct event_base *base, evutil_socket_t fd,
+ struct evmap_io *ctx, void *arg)
+{
+ const struct eventop *evsel = base->evsel;
+ void *extra;
+ int *result = arg;
+ short events = 0;
+ struct event *ev;
+ EVUTIL_ASSERT(ctx);
+
+ extra = ((char*)ctx) + sizeof(struct evmap_io);
+ if (ctx->nread)
+ events |= EV_READ;
+ if (ctx->nwrite)
+ events |= EV_WRITE;
+ if (ctx->nclose)
+ events |= EV_CLOSED;
+ if (evsel->fdinfo_len)
+ memset(extra, 0, evsel->fdinfo_len);
+ if (events &&
+ (ev = LIST_FIRST(&ctx->events)) &&
+ (ev->ev_events & EV_ET))
+ events |= EV_ET;
+ if (evsel->add(base, fd, 0, events, extra) == -1)
+ *result = -1;
+
+ return 0;
+}
+
+/* Helper for evmap_reinit_: tell the backend to add every signal for which we
+ * have pending events. */
+static int
+evmap_signal_reinit_iter_fn(struct event_base *base,
+ int signum, struct evmap_signal *ctx, void *arg)
+{
+ const struct eventop *evsel = base->evsigsel;
+ int *result = arg;
+
+ if (!LIST_EMPTY(&ctx->events)) {
+ if (evsel->add(base, signum, 0, EV_SIGNAL, NULL) == -1)
+ *result = -1;
+ }
+ return 0;
+}
+
+int
+evmap_reinit_(struct event_base *base)
+{
+ int result = 0;
+
+ evmap_io_foreach_fd(base, evmap_io_reinit_iter_fn, &result);
+ if (result < 0)
+ return -1;
+ evmap_signal_foreach_signal(base, evmap_signal_reinit_iter_fn, &result);
+ if (result < 0)
+ return -1;
+ return 0;
+}
+
+/* Helper for evmap_delete_all_: delete every event in an event_dlist. */
+static int
+delete_all_in_dlist(struct event_dlist *dlist)
+{
+ struct event *ev;
+ while ((ev = LIST_FIRST(dlist)))
+ event_del(ev);
+ return 0;
+}
+
+/* Helper for evmap_delete_all_: delete every event pending on an fd. */
+static int
+evmap_io_delete_all_iter_fn(struct event_base *base, evutil_socket_t fd,
+ struct evmap_io *io_info, void *arg)
+{
+ return delete_all_in_dlist(&io_info->events);
+}
+
+/* Helper for evmap_delete_all_: delete every event pending on a signal. */
+static int
+evmap_signal_delete_all_iter_fn(struct event_base *base, int signum,
+ struct evmap_signal *sig_info, void *arg)
+{
+ return delete_all_in_dlist(&sig_info->events);
+}
+
+void
+evmap_delete_all_(struct event_base *base)
+{
+ evmap_signal_foreach_signal(base, evmap_signal_delete_all_iter_fn, NULL);
+ evmap_io_foreach_fd(base, evmap_io_delete_all_iter_fn, NULL);
+}
+
+/** Per-fd structure for use with changelists. It keeps track, for each fd or
+ * signal using the changelist, of where its entry in the changelist is.
+ */
+struct event_changelist_fdinfo {
+ int idxplus1; /* this is the index +1, so that memset(0) will make it
+ * a no-such-element */
+};
+
+void
+event_changelist_init_(struct event_changelist *changelist)
+{
+ changelist->changes = NULL;
+ changelist->changes_size = 0;
+ changelist->n_changes = 0;
+}
+
+/** Helper: return the changelist_fdinfo corresponding to a given change. */
+static inline struct event_changelist_fdinfo *
+event_change_get_fdinfo(struct event_base *base,
+ const struct event_change *change)
+{
+ char *ptr;
+ if (change->read_change & EV_CHANGE_SIGNAL) {
+ struct evmap_signal *ctx;
+ GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
+ ptr = ((char*)ctx) + sizeof(struct evmap_signal);
+ } else {
+ struct evmap_io *ctx;
+ GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
+ ptr = ((char*)ctx) + sizeof(struct evmap_io);
+ }
+ return (void*)ptr;
+}
+
+/** Callback helper for event_changelist_assert_ok */
+static int
+event_changelist_assert_ok_foreach_iter_fn(
+ struct event_base *base,
+ evutil_socket_t fd, struct evmap_io *io, void *arg)
+{
+ struct event_changelist *changelist = &base->changelist;
+ struct event_changelist_fdinfo *f;
+ f = (void*)
+ ( ((char*)io) + sizeof(struct evmap_io) );
+ if (f->idxplus1) {
+ struct event_change *c = &changelist->changes[f->idxplus1 - 1];
+ EVUTIL_ASSERT(c->fd == fd);
+ }
+ return 0;
+}
+
+/** Make sure that the changelist is consistent with the evmap structures. */
+static void
+event_changelist_assert_ok(struct event_base *base)
+{
+ int i;
+ struct event_changelist *changelist = &base->changelist;
+
+ EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
+ for (i = 0; i < changelist->n_changes; ++i) {
+ struct event_change *c = &changelist->changes[i];
+ struct event_changelist_fdinfo *f;
+ EVUTIL_ASSERT(c->fd >= 0);
+ f = event_change_get_fdinfo(base, c);
+ EVUTIL_ASSERT(f);
+ EVUTIL_ASSERT(f->idxplus1 == i + 1);
+ }
+
+ evmap_io_foreach_fd(base,
+ event_changelist_assert_ok_foreach_iter_fn,
+ NULL);
+}
+
+#ifdef DEBUG_CHANGELIST
+#define event_changelist_check(base) event_changelist_assert_ok((base))
+#else
+#define event_changelist_check(base) ((void)0)
+#endif
+
+void
+event_changelist_remove_all_(struct event_changelist *changelist,
+ struct event_base *base)
+{
+ int i;
+
+ event_changelist_check(base);
+
+ for (i = 0; i < changelist->n_changes; ++i) {
+ struct event_change *ch = &changelist->changes[i];
+ struct event_changelist_fdinfo *fdinfo =
+ event_change_get_fdinfo(base, ch);
+ EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
+ fdinfo->idxplus1 = 0;
+ }
+
+ changelist->n_changes = 0;
+
+ event_changelist_check(base);
+}
+
+void
+event_changelist_freemem_(struct event_changelist *changelist)
+{
+ if (changelist->changes)
+ mm_free(changelist->changes);
+ event_changelist_init_(changelist); /* zero it all out. */
+}
+
+/** Increase the size of 'changelist' to hold more changes. */
+static int
+event_changelist_grow(struct event_changelist *changelist)
+{
+ int new_size;
+ struct event_change *new_changes;
+ if (changelist->changes_size < 64)
+ new_size = 64;
+ else
+ new_size = changelist->changes_size * 2;
+
+ new_changes = mm_realloc(changelist->changes,
+ new_size * sizeof(struct event_change));
+
+ if (EVUTIL_UNLIKELY(new_changes == NULL))
+ return (-1);
+
+ changelist->changes = new_changes;
+ changelist->changes_size = new_size;
+
+ return (0);
+}
+
+/** Return a pointer to the changelist entry for the file descriptor or signal
+ * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its
+ * old_events field to old_events.
+ */
+static struct event_change *
+event_changelist_get_or_construct(struct event_changelist *changelist,
+ evutil_socket_t fd,
+ short old_events,
+ struct event_changelist_fdinfo *fdinfo)
+{
+ struct event_change *change;
+
+ if (fdinfo->idxplus1 == 0) {
+ int idx;
+ EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
+
+ if (changelist->n_changes == changelist->changes_size) {
+ if (event_changelist_grow(changelist) < 0)
+ return NULL;
+ }
+
+ idx = changelist->n_changes++;
+ change = &changelist->changes[idx];
+ fdinfo->idxplus1 = idx + 1;
+
+ memset(change, 0, sizeof(struct event_change));
+ change->fd = fd;
+ change->old_events = old_events;
+ } else {
+ change = &changelist->changes[fdinfo->idxplus1 - 1];
+ EVUTIL_ASSERT(change->fd == fd);
+ }
+ return change;
+}
+
+int
+event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events,
+ void *p)
+{
+ struct event_changelist *changelist = &base->changelist;
+ struct event_changelist_fdinfo *fdinfo = p;
+ struct event_change *change;
+
+ event_changelist_check(base);
+
+ change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
+ if (!change)
+ return -1;
+
+ /* An add replaces any previous delete, but doesn't result in a no-op,
+ * since the delete might fail (because the fd had been closed since
+ * the last add, for instance. */
+
+ if (events & (EV_READ|EV_SIGNAL)) {
+ change->read_change = EV_CHANGE_ADD |
+ (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
+ }
+ if (events & EV_WRITE) {
+ change->write_change = EV_CHANGE_ADD |
+ (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
+ }
+ if (events & EV_CLOSED) {
+ change->close_change = EV_CHANGE_ADD |
+ (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
+ }
+
+ event_changelist_check(base);
+ return (0);
+}
+
+int
+event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events,
+ void *p)
+{
+ struct event_changelist *changelist = &base->changelist;
+ struct event_changelist_fdinfo *fdinfo = p;
+ struct event_change *change;
+
+ event_changelist_check(base);
+ change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
+ event_changelist_check(base);
+ if (!change)
+ return -1;
+
+ /* A delete on an event set that doesn't contain the event to be
+ deleted produces a no-op. This effectively emoves any previous
+ uncommitted add, rather than replacing it: on those platforms where
+ "add, delete, dispatch" is not the same as "no-op, dispatch", we
+ want the no-op behavior.
+
+ If we have a no-op item, we could remove it it from the list
+ entirely, but really there's not much point: skipping the no-op
+ change when we do the dispatch later is far cheaper than rejuggling
+ the array now.
+
+ As this stands, it also lets through deletions of events that are
+ not currently set.
+ */
+
+ if (events & (EV_READ|EV_SIGNAL)) {
+ if (!(change->old_events & (EV_READ | EV_SIGNAL)))
+ change->read_change = 0;
+ else
+ change->read_change = EV_CHANGE_DEL;
+ }
+ if (events & EV_WRITE) {
+ if (!(change->old_events & EV_WRITE))
+ change->write_change = 0;
+ else
+ change->write_change = EV_CHANGE_DEL;
+ }
+ if (events & EV_CLOSED) {
+ if (!(change->old_events & EV_CLOSED))
+ change->close_change = 0;
+ else
+ change->close_change = EV_CHANGE_DEL;
+ }
+
+ event_changelist_check(base);
+ return (0);
+}
+
+/* Helper for evmap_check_integrity_: verify that all of the events pending on
+ * given fd are set up correctly, and that the nread and nwrite counts on that
+ * fd are correct. */
+static int
+evmap_io_check_integrity_fn(struct event_base *base, evutil_socket_t fd,
+ struct evmap_io *io_info, void *arg)
+{
+ struct event *ev;
+ int n_read = 0, n_write = 0, n_close = 0;
+
+ /* First, make sure the list itself isn't corrupt. Otherwise,
+ * running LIST_FOREACH could be an exciting adventure. */
+ EVUTIL_ASSERT_LIST_OK(&io_info->events, event, ev_io_next);
+
+ LIST_FOREACH(ev, &io_info->events, ev_io_next) {
+ EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
+ EVUTIL_ASSERT(ev->ev_fd == fd);
+ EVUTIL_ASSERT(!(ev->ev_events & EV_SIGNAL));
+ EVUTIL_ASSERT((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));
+ if (ev->ev_events & EV_READ)
+ ++n_read;
+ if (ev->ev_events & EV_WRITE)
+ ++n_write;
+ if (ev->ev_events & EV_CLOSED)
+ ++n_close;
+ }
+
+ EVUTIL_ASSERT(n_read == io_info->nread);
+ EVUTIL_ASSERT(n_write == io_info->nwrite);
+ EVUTIL_ASSERT(n_close == io_info->nclose);
+
+ return 0;
+}
+
+/* Helper for evmap_check_integrity_: verify that all of the events pending
+ * on given signal are set up correctly. */
+static int
+evmap_signal_check_integrity_fn(struct event_base *base,
+ int signum, struct evmap_signal *sig_info, void *arg)
+{
+ struct event *ev;
+ /* First, make sure the list itself isn't corrupt. */
+ EVUTIL_ASSERT_LIST_OK(&sig_info->events, event, ev_signal_next);
+
+ LIST_FOREACH(ev, &sig_info->events, ev_io_next) {
+ EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
+ EVUTIL_ASSERT(ev->ev_fd == signum);
+ EVUTIL_ASSERT((ev->ev_events & EV_SIGNAL));
+ EVUTIL_ASSERT(!(ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));
+ }
+ return 0;
+}
+
+void
+evmap_check_integrity_(struct event_base *base)
+{
+ evmap_io_foreach_fd(base, evmap_io_check_integrity_fn, NULL);
+ evmap_signal_foreach_signal(base, evmap_signal_check_integrity_fn, NULL);
+
+ if (base->evsel->add == event_changelist_add_)
+ event_changelist_assert_ok(base);
+}
+
+/* Helper type for evmap_foreach_event_: Bundles a function to call on every
+ * event, and the user-provided void* to use as its third argument. */
+struct evmap_foreach_event_helper {
+ event_base_foreach_event_cb fn;
+ void *arg;
+};
+
+/* Helper for evmap_foreach_event_: calls a provided function on every event
+ * pending on a given fd. */
+static int
+evmap_io_foreach_event_fn(struct event_base *base, evutil_socket_t fd,
+ struct evmap_io *io_info, void *arg)
+{
+ struct evmap_foreach_event_helper *h = arg;
+ struct event *ev;
+ int r;
+ LIST_FOREACH(ev, &io_info->events, ev_io_next) {
+ if ((r = h->fn(base, ev, h->arg)))
+ return r;
+ }
+ return 0;
+}
+
+/* Helper for evmap_foreach_event_: calls a provided function on every event
+ * pending on a given signal. */
+static int
+evmap_signal_foreach_event_fn(struct event_base *base, int signum,
+ struct evmap_signal *sig_info, void *arg)
+{
+ struct event *ev;
+ struct evmap_foreach_event_helper *h = arg;
+ int r;
+ LIST_FOREACH(ev, &sig_info->events, ev_signal_next) {
+ if ((r = h->fn(base, ev, h->arg)))
+ return r;
+ }
+ return 0;
+}
+
+int
+evmap_foreach_event_(struct event_base *base,
+ event_base_foreach_event_cb fn, void *arg)
+{
+ struct evmap_foreach_event_helper h;
+ int r;
+ h.fn = fn;
+ h.arg = arg;
+ if ((r = evmap_io_foreach_fd(base, evmap_io_foreach_event_fn, &h)))
+ return r;
+ return evmap_signal_foreach_signal(base, evmap_signal_foreach_event_fn, &h);
+}
+
diff --git a/libs/libevent/src/evrpc-internal.h b/libs/libevent/src/evrpc-internal.h
new file mode 100644
index 0000000000..9eb376386d
--- /dev/null
+++ b/libs/libevent/src/evrpc-internal.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2006-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVRPC_INTERNAL_H_INCLUDED_
+#define EVRPC_INTERNAL_H_INCLUDED_
+
+#include "event2/http.h"
+#include "http-internal.h"
+
+struct evrpc;
+struct evrpc_request_wrapper;
+
+#define EVRPC_URI_PREFIX "/.rpc."
+
+struct evrpc_hook {
+ TAILQ_ENTRY(evrpc_hook) next;
+
+ /* returns EVRPC_TERMINATE; if the rpc should be aborted.
+ * a hook is is allowed to rewrite the evbuffer
+ */
+ int (*process)(void *, struct evhttp_request *,
+ struct evbuffer *, void *);
+ void *process_arg;
+};
+
+TAILQ_HEAD(evrpc_hook_list, evrpc_hook);
+
+/*
+ * this is shared between the base and the pool, so that we can reuse
+ * the hook adding functions; we alias both evrpc_pool and evrpc_base
+ * to this common structure.
+ */
+
+struct evrpc_hook_ctx;
+TAILQ_HEAD(evrpc_pause_list, evrpc_hook_ctx);
+
+struct evrpc_hooks_ {
+ /* hooks for processing outbound and inbound rpcs */
+ struct evrpc_hook_list in_hooks;
+ struct evrpc_hook_list out_hooks;
+
+ struct evrpc_pause_list pause_requests;
+};
+
+#define input_hooks common.in_hooks
+#define output_hooks common.out_hooks
+#define paused_requests common.pause_requests
+
+struct evrpc_base {
+ struct evrpc_hooks_ common;
+
+ /* the HTTP server under which we register our RPC calls */
+ struct evhttp* http_server;
+
+ /* a list of all RPCs registered with us */
+ TAILQ_HEAD(evrpc_list, evrpc) registered_rpcs;
+};
+
+struct evrpc_req_generic;
+void evrpc_reqstate_free_(struct evrpc_req_generic* rpc_state);
+
+/* A pool for holding evhttp_connection objects */
+struct evrpc_pool {
+ struct evrpc_hooks_ common;
+
+ struct event_base *base;
+
+ struct evconq connections;
+
+ int timeout;
+
+ TAILQ_HEAD(evrpc_requestq, evrpc_request_wrapper) (requests);
+};
+
+struct evrpc_hook_ctx {
+ TAILQ_ENTRY(evrpc_hook_ctx) next;
+
+ void *ctx;
+ void (*cb)(void *, enum EVRPC_HOOK_RESULT);
+};
+
+struct evrpc_meta {
+ TAILQ_ENTRY(evrpc_meta) next;
+ char *key;
+
+ void *data;
+ size_t data_size;
+};
+
+TAILQ_HEAD(evrpc_meta_list, evrpc_meta);
+
+struct evrpc_hook_meta {
+ struct evrpc_meta_list meta_data;
+ struct evhttp_connection *evcon;
+};
+
+/* allows association of meta data with a request */
+static void evrpc_hook_associate_meta_(struct evrpc_hook_meta **pctx,
+ struct evhttp_connection *evcon);
+
+/* creates a new meta data store */
+static struct evrpc_hook_meta *evrpc_hook_meta_new_(void);
+
+/* frees the meta data associated with a request */
+static void evrpc_hook_context_free_(struct evrpc_hook_meta *ctx);
+
+/* the server side of an rpc */
+
+/* We alias the RPC specific structs to this voided one */
+struct evrpc_req_generic {
+ /*
+ * allows association of meta data via hooks - needs to be
+ * synchronized with evrpc_request_wrapper
+ */
+ struct evrpc_hook_meta *hook_meta;
+
+ /* the unmarshaled request object */
+ void *request;
+
+ /* the empty reply object that needs to be filled in */
+ void *reply;
+
+ /*
+ * the static structure for this rpc; that can be used to
+ * automatically unmarshal and marshal the http buffers.
+ */
+ struct evrpc *rpc;
+
+ /*
+ * the http request structure on which we need to answer.
+ */
+ struct evhttp_request* http_req;
+
+ /*
+ * Temporary data store for marshaled data
+ */
+ struct evbuffer* rpc_data;
+};
+
+/* the client side of an rpc request */
+struct evrpc_request_wrapper {
+ /*
+ * allows association of meta data via hooks - needs to be
+ * synchronized with evrpc_req_generic.
+ */
+ struct evrpc_hook_meta *hook_meta;
+
+ TAILQ_ENTRY(evrpc_request_wrapper) next;
+
+ /* pool on which this rpc request is being made */
+ struct evrpc_pool *pool;
+
+ /* connection on which the request is being sent */
+ struct evhttp_connection *evcon;
+
+ /* the actual request */
+ struct evhttp_request *req;
+
+ /* event for implementing request timeouts */
+ struct event ev_timeout;
+
+ /* the name of the rpc */
+ char *name;
+
+ /* callback */
+ void (*cb)(struct evrpc_status*, void *request, void *reply, void *arg);
+ void *cb_arg;
+
+ void *request;
+ void *reply;
+
+ /* unmarshals the buffer into the proper request structure */
+ void (*request_marshal)(struct evbuffer *, void *);
+
+ /* removes all stored state in the reply */
+ void (*reply_clear)(void *);
+
+ /* marshals the reply into a buffer */
+ int (*reply_unmarshal)(void *, struct evbuffer*);
+};
+
+#endif /* EVRPC_INTERNAL_H_INCLUDED_ */
diff --git a/libs/libevent/src/evrpc.c b/libs/libevent/src/evrpc.c
new file mode 100644
index 0000000000..2443ab2793
--- /dev/null
+++ b/libs/libevent/src/evrpc.c
@@ -0,0 +1,1171 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <winsock2.h>
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+
+#include <sys/types.h>
+#ifndef _WIN32
+#include <sys/socket.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#include <stdio.h>
+#include <stdlib.h>
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+
+#include <sys/queue.h>
+
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "event2/rpc.h"
+#include "event2/rpc_struct.h"
+#include "evrpc-internal.h"
+#include "event2/http.h"
+#include "event2/buffer.h"
+#include "event2/tag.h"
+#include "event2/http_struct.h"
+#include "event2/http_compat.h"
+#include "event2/util.h"
+#include "util-internal.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+
+struct evrpc_base *
+evrpc_init(struct evhttp *http_server)
+{
+ struct evrpc_base* base = mm_calloc(1, sizeof(struct evrpc_base));
+ if (base == NULL)
+ return (NULL);
+
+ /* we rely on the tagging sub system */
+ evtag_init();
+
+ TAILQ_INIT(&base->registered_rpcs);
+ TAILQ_INIT(&base->input_hooks);
+ TAILQ_INIT(&base->output_hooks);
+
+ TAILQ_INIT(&base->paused_requests);
+
+ base->http_server = http_server;
+
+ return (base);
+}
+
+void
+evrpc_free(struct evrpc_base *base)
+{
+ struct evrpc *rpc;
+ struct evrpc_hook *hook;
+ struct evrpc_hook_ctx *pause;
+ int r;
+
+ while ((rpc = TAILQ_FIRST(&base->registered_rpcs)) != NULL) {
+ r = evrpc_unregister_rpc(base, rpc->uri);
+ EVUTIL_ASSERT(r == 0);
+ }
+ while ((pause = TAILQ_FIRST(&base->paused_requests)) != NULL) {
+ TAILQ_REMOVE(&base->paused_requests, pause, next);
+ mm_free(pause);
+ }
+ while ((hook = TAILQ_FIRST(&base->input_hooks)) != NULL) {
+ r = evrpc_remove_hook(base, EVRPC_INPUT, hook);
+ EVUTIL_ASSERT(r);
+ }
+ while ((hook = TAILQ_FIRST(&base->output_hooks)) != NULL) {
+ r = evrpc_remove_hook(base, EVRPC_OUTPUT, hook);
+ EVUTIL_ASSERT(r);
+ }
+ mm_free(base);
+}
+
+void *
+evrpc_add_hook(void *vbase,
+ enum EVRPC_HOOK_TYPE hook_type,
+ int (*cb)(void *, struct evhttp_request *, struct evbuffer *, void *),
+ void *cb_arg)
+{
+ struct evrpc_hooks_ *base = vbase;
+ struct evrpc_hook_list *head = NULL;
+ struct evrpc_hook *hook = NULL;
+ switch (hook_type) {
+ case EVRPC_INPUT:
+ head = &base->in_hooks;
+ break;
+ case EVRPC_OUTPUT:
+ head = &base->out_hooks;
+ break;
+ default:
+ EVUTIL_ASSERT(hook_type == EVRPC_INPUT || hook_type == EVRPC_OUTPUT);
+ }
+
+ hook = mm_calloc(1, sizeof(struct evrpc_hook));
+ EVUTIL_ASSERT(hook != NULL);
+
+ hook->process = cb;
+ hook->process_arg = cb_arg;
+ TAILQ_INSERT_TAIL(head, hook, next);
+
+ return (hook);
+}
+
+static int
+evrpc_remove_hook_internal(struct evrpc_hook_list *head, void *handle)
+{
+ struct evrpc_hook *hook = NULL;
+ TAILQ_FOREACH(hook, head, next) {
+ if (hook == handle) {
+ TAILQ_REMOVE(head, hook, next);
+ mm_free(hook);
+ return (1);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * remove the hook specified by the handle
+ */
+
+int
+evrpc_remove_hook(void *vbase, enum EVRPC_HOOK_TYPE hook_type, void *handle)
+{
+ struct evrpc_hooks_ *base = vbase;
+ struct evrpc_hook_list *head = NULL;
+ switch (hook_type) {
+ case EVRPC_INPUT:
+ head = &base->in_hooks;
+ break;
+ case EVRPC_OUTPUT:
+ head = &base->out_hooks;
+ break;
+ default:
+ EVUTIL_ASSERT(hook_type == EVRPC_INPUT || hook_type == EVRPC_OUTPUT);
+ }
+
+ return (evrpc_remove_hook_internal(head, handle));
+}
+
+static int
+evrpc_process_hooks(struct evrpc_hook_list *head, void *ctx,
+ struct evhttp_request *req, struct evbuffer *evbuf)
+{
+ struct evrpc_hook *hook;
+ TAILQ_FOREACH(hook, head, next) {
+ int res = hook->process(ctx, req, evbuf, hook->process_arg);
+ if (res != EVRPC_CONTINUE)
+ return (res);
+ }
+
+ return (EVRPC_CONTINUE);
+}
+
+static void evrpc_pool_schedule(struct evrpc_pool *pool);
+static void evrpc_request_cb(struct evhttp_request *, void *);
+
+/*
+ * Registers a new RPC with the HTTP server. The evrpc object is expected
+ * to have been filled in via the EVRPC_REGISTER_OBJECT macro which in turn
+ * calls this function.
+ */
+
+static char *
+evrpc_construct_uri(const char *uri)
+{
+ char *constructed_uri;
+ size_t constructed_uri_len;
+
+ constructed_uri_len = strlen(EVRPC_URI_PREFIX) + strlen(uri) + 1;
+ if ((constructed_uri = mm_malloc(constructed_uri_len)) == NULL)
+ event_err(1, "%s: failed to register rpc at %s",
+ __func__, uri);
+ memcpy(constructed_uri, EVRPC_URI_PREFIX, strlen(EVRPC_URI_PREFIX));
+ memcpy(constructed_uri + strlen(EVRPC_URI_PREFIX), uri, strlen(uri));
+ constructed_uri[constructed_uri_len - 1] = '\0';
+
+ return (constructed_uri);
+}
+
+int
+evrpc_register_rpc(struct evrpc_base *base, struct evrpc *rpc,
+ void (*cb)(struct evrpc_req_generic *, void *), void *cb_arg)
+{
+ char *constructed_uri = evrpc_construct_uri(rpc->uri);
+
+ rpc->base = base;
+ rpc->cb = cb;
+ rpc->cb_arg = cb_arg;
+
+ TAILQ_INSERT_TAIL(&base->registered_rpcs, rpc, next);
+
+ evhttp_set_cb(base->http_server,
+ constructed_uri,
+ evrpc_request_cb,
+ rpc);
+
+ mm_free(constructed_uri);
+
+ return (0);
+}
+
+int
+evrpc_unregister_rpc(struct evrpc_base *base, const char *name)
+{
+ char *registered_uri = NULL;
+ struct evrpc *rpc;
+ int r;
+
+ /* find the right rpc; linear search might be slow */
+ TAILQ_FOREACH(rpc, &base->registered_rpcs, next) {
+ if (strcmp(rpc->uri, name) == 0)
+ break;
+ }
+ if (rpc == NULL) {
+ /* We did not find an RPC with this name */
+ return (-1);
+ }
+ TAILQ_REMOVE(&base->registered_rpcs, rpc, next);
+
+ registered_uri = evrpc_construct_uri(name);
+
+ /* remove the http server callback */
+ r = evhttp_del_cb(base->http_server, registered_uri);
+ EVUTIL_ASSERT(r == 0);
+
+ mm_free(registered_uri);
+
+ mm_free((char *)rpc->uri);
+ mm_free(rpc);
+ return (0);
+}
+
+static int evrpc_pause_request(void *vbase, void *ctx,
+ void (*cb)(void *, enum EVRPC_HOOK_RESULT));
+static void evrpc_request_cb_closure(void *, enum EVRPC_HOOK_RESULT);
+
+static void
+evrpc_request_cb(struct evhttp_request *req, void *arg)
+{
+ struct evrpc *rpc = arg;
+ struct evrpc_req_generic *rpc_state = NULL;
+
+ /* let's verify the outside parameters */
+ if (req->type != EVHTTP_REQ_POST ||
+ evbuffer_get_length(req->input_buffer) <= 0)
+ goto error;
+
+ rpc_state = mm_calloc(1, sizeof(struct evrpc_req_generic));
+ if (rpc_state == NULL)
+ goto error;
+ rpc_state->rpc = rpc;
+ rpc_state->http_req = req;
+ rpc_state->rpc_data = NULL;
+
+ if (TAILQ_FIRST(&rpc->base->input_hooks) != NULL) {
+ int hook_res;
+
+ evrpc_hook_associate_meta_(&rpc_state->hook_meta, req->evcon);
+
+ /*
+ * allow hooks to modify the outgoing request
+ */
+ hook_res = evrpc_process_hooks(&rpc->base->input_hooks,
+ rpc_state, req, req->input_buffer);
+ switch (hook_res) {
+ case EVRPC_TERMINATE:
+ goto error;
+ case EVRPC_PAUSE:
+ evrpc_pause_request(rpc->base, rpc_state,
+ evrpc_request_cb_closure);
+ return;
+ case EVRPC_CONTINUE:
+ break;
+ default:
+ EVUTIL_ASSERT(hook_res == EVRPC_TERMINATE ||
+ hook_res == EVRPC_CONTINUE ||
+ hook_res == EVRPC_PAUSE);
+ }
+ }
+
+ evrpc_request_cb_closure(rpc_state, EVRPC_CONTINUE);
+ return;
+
+error:
+ evrpc_reqstate_free_(rpc_state);
+ evhttp_send_error(req, HTTP_SERVUNAVAIL, NULL);
+ return;
+}
+
+static void
+evrpc_request_cb_closure(void *arg, enum EVRPC_HOOK_RESULT hook_res)
+{
+ struct evrpc_req_generic *rpc_state = arg;
+ struct evrpc *rpc;
+ struct evhttp_request *req;
+
+ EVUTIL_ASSERT(rpc_state);
+ rpc = rpc_state->rpc;
+ req = rpc_state->http_req;
+
+ if (hook_res == EVRPC_TERMINATE)
+ goto error;
+
+ /* let's check that we can parse the request */
+ rpc_state->request = rpc->request_new(rpc->request_new_arg);
+ if (rpc_state->request == NULL)
+ goto error;
+
+ if (rpc->request_unmarshal(
+ rpc_state->request, req->input_buffer) == -1) {
+ /* we failed to parse the request; that's a bummer */
+ goto error;
+ }
+
+ /* at this point, we have a well formed request, prepare the reply */
+
+ rpc_state->reply = rpc->reply_new(rpc->reply_new_arg);
+ if (rpc_state->reply == NULL)
+ goto error;
+
+ /* give the rpc to the user; they can deal with it */
+ rpc->cb(rpc_state, rpc->cb_arg);
+
+ return;
+
+error:
+ evrpc_reqstate_free_(rpc_state);
+ evhttp_send_error(req, HTTP_SERVUNAVAIL, NULL);
+ return;
+}
+
+
+void
+evrpc_reqstate_free_(struct evrpc_req_generic* rpc_state)
+{
+ struct evrpc *rpc;
+ EVUTIL_ASSERT(rpc_state != NULL);
+ rpc = rpc_state->rpc;
+
+ /* clean up all memory */
+ if (rpc_state->hook_meta != NULL)
+ evrpc_hook_context_free_(rpc_state->hook_meta);
+ if (rpc_state->request != NULL)
+ rpc->request_free(rpc_state->request);
+ if (rpc_state->reply != NULL)
+ rpc->reply_free(rpc_state->reply);
+ if (rpc_state->rpc_data != NULL)
+ evbuffer_free(rpc_state->rpc_data);
+ mm_free(rpc_state);
+}
+
+static void
+evrpc_request_done_closure(void *, enum EVRPC_HOOK_RESULT);
+
+void
+evrpc_request_done(struct evrpc_req_generic *rpc_state)
+{
+ struct evhttp_request *req;
+ struct evrpc *rpc;
+
+ EVUTIL_ASSERT(rpc_state);
+
+ req = rpc_state->http_req;
+ rpc = rpc_state->rpc;
+
+ if (rpc->reply_complete(rpc_state->reply) == -1) {
+ /* the reply was not completely filled in. error out */
+ goto error;
+ }
+
+ if ((rpc_state->rpc_data = evbuffer_new()) == NULL) {
+ /* out of memory */
+ goto error;
+ }
+
+ /* serialize the reply */
+ rpc->reply_marshal(rpc_state->rpc_data, rpc_state->reply);
+
+ if (TAILQ_FIRST(&rpc->base->output_hooks) != NULL) {
+ int hook_res;
+
+ evrpc_hook_associate_meta_(&rpc_state->hook_meta, req->evcon);
+
+ /* do hook based tweaks to the request */
+ hook_res = evrpc_process_hooks(&rpc->base->output_hooks,
+ rpc_state, req, rpc_state->rpc_data);
+ switch (hook_res) {
+ case EVRPC_TERMINATE:
+ goto error;
+ case EVRPC_PAUSE:
+ if (evrpc_pause_request(rpc->base, rpc_state,
+ evrpc_request_done_closure) == -1)
+ goto error;
+ return;
+ case EVRPC_CONTINUE:
+ break;
+ default:
+ EVUTIL_ASSERT(hook_res == EVRPC_TERMINATE ||
+ hook_res == EVRPC_CONTINUE ||
+ hook_res == EVRPC_PAUSE);
+ }
+ }
+
+ evrpc_request_done_closure(rpc_state, EVRPC_CONTINUE);
+ return;
+
+error:
+ evrpc_reqstate_free_(rpc_state);
+ evhttp_send_error(req, HTTP_SERVUNAVAIL, NULL);
+ return;
+}
+
+void *
+evrpc_get_request(struct evrpc_req_generic *req)
+{
+ return req->request;
+}
+
+void *
+evrpc_get_reply(struct evrpc_req_generic *req)
+{
+ return req->reply;
+}
+
+static void
+evrpc_request_done_closure(void *arg, enum EVRPC_HOOK_RESULT hook_res)
+{
+ struct evrpc_req_generic *rpc_state = arg;
+ struct evhttp_request *req;
+ EVUTIL_ASSERT(rpc_state);
+ req = rpc_state->http_req;
+
+ if (hook_res == EVRPC_TERMINATE)
+ goto error;
+
+ /* on success, we are going to transmit marshaled binary data */
+ if (evhttp_find_header(req->output_headers, "Content-Type") == NULL) {
+ evhttp_add_header(req->output_headers,
+ "Content-Type", "application/octet-stream");
+ }
+ evhttp_send_reply(req, HTTP_OK, "OK", rpc_state->rpc_data);
+
+ evrpc_reqstate_free_(rpc_state);
+
+ return;
+
+error:
+ evrpc_reqstate_free_(rpc_state);
+ evhttp_send_error(req, HTTP_SERVUNAVAIL, NULL);
+ return;
+}
+
+
+/* Client implementation of RPC site */
+
+static int evrpc_schedule_request(struct evhttp_connection *connection,
+ struct evrpc_request_wrapper *ctx);
+
+struct evrpc_pool *
+evrpc_pool_new(struct event_base *base)
+{
+ struct evrpc_pool *pool = mm_calloc(1, sizeof(struct evrpc_pool));
+ if (pool == NULL)
+ return (NULL);
+
+ TAILQ_INIT(&pool->connections);
+ TAILQ_INIT(&pool->requests);
+
+ TAILQ_INIT(&pool->paused_requests);
+
+ TAILQ_INIT(&pool->input_hooks);
+ TAILQ_INIT(&pool->output_hooks);
+
+ pool->base = base;
+ pool->timeout = -1;
+
+ return (pool);
+}
+
+static void
+evrpc_request_wrapper_free(struct evrpc_request_wrapper *request)
+{
+ if (request->hook_meta != NULL)
+ evrpc_hook_context_free_(request->hook_meta);
+ mm_free(request->name);
+ mm_free(request);
+}
+
+void
+evrpc_pool_free(struct evrpc_pool *pool)
+{
+ struct evhttp_connection *connection;
+ struct evrpc_request_wrapper *request;
+ struct evrpc_hook_ctx *pause;
+ struct evrpc_hook *hook;
+ int r;
+
+ while ((request = TAILQ_FIRST(&pool->requests)) != NULL) {
+ TAILQ_REMOVE(&pool->requests, request, next);
+ evrpc_request_wrapper_free(request);
+ }
+
+ while ((pause = TAILQ_FIRST(&pool->paused_requests)) != NULL) {
+ TAILQ_REMOVE(&pool->paused_requests, pause, next);
+ mm_free(pause);
+ }
+
+ while ((connection = TAILQ_FIRST(&pool->connections)) != NULL) {
+ TAILQ_REMOVE(&pool->connections, connection, next);
+ evhttp_connection_free(connection);
+ }
+
+ while ((hook = TAILQ_FIRST(&pool->input_hooks)) != NULL) {
+ r = evrpc_remove_hook(pool, EVRPC_INPUT, hook);
+ EVUTIL_ASSERT(r);
+ }
+
+ while ((hook = TAILQ_FIRST(&pool->output_hooks)) != NULL) {
+ r = evrpc_remove_hook(pool, EVRPC_OUTPUT, hook);
+ EVUTIL_ASSERT(r);
+ }
+
+ mm_free(pool);
+}
+
+/*
+ * Add a connection to the RPC pool. A request scheduled on the pool
+ * may use any available connection.
+ */
+
+void
+evrpc_pool_add_connection(struct evrpc_pool *pool,
+ struct evhttp_connection *connection)
+{
+ EVUTIL_ASSERT(connection->http_server == NULL);
+ TAILQ_INSERT_TAIL(&pool->connections, connection, next);
+
+ /*
+ * associate an event base with this connection
+ */
+ if (pool->base != NULL)
+ evhttp_connection_set_base(connection, pool->base);
+
+ /*
+ * unless a timeout was specifically set for a connection,
+ * the connection inherits the timeout from the pool.
+ */
+ if (!evutil_timerisset(&connection->timeout))
+ evhttp_connection_set_timeout(connection, pool->timeout);
+
+ /*
+ * if we have any requests pending, schedule them with the new
+ * connections.
+ */
+
+ if (TAILQ_FIRST(&pool->requests) != NULL) {
+ struct evrpc_request_wrapper *request =
+ TAILQ_FIRST(&pool->requests);
+ TAILQ_REMOVE(&pool->requests, request, next);
+ evrpc_schedule_request(connection, request);
+ }
+}
+
+void
+evrpc_pool_remove_connection(struct evrpc_pool *pool,
+ struct evhttp_connection *connection)
+{
+ TAILQ_REMOVE(&pool->connections, connection, next);
+}
+
+void
+evrpc_pool_set_timeout(struct evrpc_pool *pool, int timeout_in_secs)
+{
+ struct evhttp_connection *evcon;
+ TAILQ_FOREACH(evcon, &pool->connections, next) {
+ evhttp_connection_set_timeout(evcon, timeout_in_secs);
+ }
+ pool->timeout = timeout_in_secs;
+}
+
+
+static void evrpc_reply_done(struct evhttp_request *, void *);
+static void evrpc_request_timeout(evutil_socket_t, short, void *);
+
+/*
+ * Finds a connection object associated with the pool that is currently
+ * idle and can be used to make a request.
+ */
+static struct evhttp_connection *
+evrpc_pool_find_connection(struct evrpc_pool *pool)
+{
+ struct evhttp_connection *connection;
+ TAILQ_FOREACH(connection, &pool->connections, next) {
+ if (TAILQ_FIRST(&connection->requests) == NULL)
+ return (connection);
+ }
+
+ return (NULL);
+}
+
+/*
+ * Prototypes responsible for evrpc scheduling and hooking
+ */
+
+static void evrpc_schedule_request_closure(void *ctx, enum EVRPC_HOOK_RESULT);
+
+/*
+ * We assume that the ctx is no longer queued on the pool.
+ */
+static int
+evrpc_schedule_request(struct evhttp_connection *connection,
+ struct evrpc_request_wrapper *ctx)
+{
+ struct evhttp_request *req = NULL;
+ struct evrpc_pool *pool = ctx->pool;
+ struct evrpc_status status;
+
+ if ((req = evhttp_request_new(evrpc_reply_done, ctx)) == NULL)
+ goto error;
+
+ /* serialize the request data into the output buffer */
+ ctx->request_marshal(req->output_buffer, ctx->request);
+
+ /* we need to know the connection that we might have to abort */
+ ctx->evcon = connection;
+
+ /* if we get paused we also need to know the request */
+ ctx->req = req;
+
+ if (TAILQ_FIRST(&pool->output_hooks) != NULL) {
+ int hook_res;
+
+ evrpc_hook_associate_meta_(&ctx->hook_meta, connection);
+
+ /* apply hooks to the outgoing request */
+ hook_res = evrpc_process_hooks(&pool->output_hooks,
+ ctx, req, req->output_buffer);
+
+ switch (hook_res) {
+ case EVRPC_TERMINATE:
+ goto error;
+ case EVRPC_PAUSE:
+ /* we need to be explicitly resumed */
+ if (evrpc_pause_request(pool, ctx,
+ evrpc_schedule_request_closure) == -1)
+ goto error;
+ return (0);
+ case EVRPC_CONTINUE:
+ /* we can just continue */
+ break;
+ default:
+ EVUTIL_ASSERT(hook_res == EVRPC_TERMINATE ||
+ hook_res == EVRPC_CONTINUE ||
+ hook_res == EVRPC_PAUSE);
+ }
+ }
+
+ evrpc_schedule_request_closure(ctx, EVRPC_CONTINUE);
+ return (0);
+
+error:
+ memset(&status, 0, sizeof(status));
+ status.error = EVRPC_STATUS_ERR_UNSTARTED;
+ (*ctx->cb)(&status, ctx->request, ctx->reply, ctx->cb_arg);
+ evrpc_request_wrapper_free(ctx);
+ return (-1);
+}
+
+static void
+evrpc_schedule_request_closure(void *arg, enum EVRPC_HOOK_RESULT hook_res)
+{
+ struct evrpc_request_wrapper *ctx = arg;
+ struct evhttp_connection *connection = ctx->evcon;
+ struct evhttp_request *req = ctx->req;
+ struct evrpc_pool *pool = ctx->pool;
+ struct evrpc_status status;
+ char *uri = NULL;
+ int res = 0;
+
+ if (hook_res == EVRPC_TERMINATE)
+ goto error;
+
+ uri = evrpc_construct_uri(ctx->name);
+ if (uri == NULL)
+ goto error;
+
+ if (pool->timeout > 0) {
+ /*
+ * a timeout after which the whole rpc is going to be aborted.
+ */
+ struct timeval tv;
+ evutil_timerclear(&tv);
+ tv.tv_sec = pool->timeout;
+ evtimer_add(&ctx->ev_timeout, &tv);
+ }
+
+ /* start the request over the connection */
+ res = evhttp_make_request(connection, req, EVHTTP_REQ_POST, uri);
+ mm_free(uri);
+
+ if (res == -1)
+ goto error;
+
+ return;
+
+error:
+ memset(&status, 0, sizeof(status));
+ status.error = EVRPC_STATUS_ERR_UNSTARTED;
+ (*ctx->cb)(&status, ctx->request, ctx->reply, ctx->cb_arg);
+ evrpc_request_wrapper_free(ctx);
+}
+
+/* we just queue the paused request on the pool under the req object */
+static int
+evrpc_pause_request(void *vbase, void *ctx,
+ void (*cb)(void *, enum EVRPC_HOOK_RESULT))
+{
+ struct evrpc_hooks_ *base = vbase;
+ struct evrpc_hook_ctx *pause = mm_malloc(sizeof(*pause));
+ if (pause == NULL)
+ return (-1);
+
+ pause->ctx = ctx;
+ pause->cb = cb;
+
+ TAILQ_INSERT_TAIL(&base->pause_requests, pause, next);
+ return (0);
+}
+
+int
+evrpc_resume_request(void *vbase, void *ctx, enum EVRPC_HOOK_RESULT res)
+{
+ struct evrpc_hooks_ *base = vbase;
+ struct evrpc_pause_list *head = &base->pause_requests;
+ struct evrpc_hook_ctx *pause;
+
+ TAILQ_FOREACH(pause, head, next) {
+ if (pause->ctx == ctx)
+ break;
+ }
+
+ if (pause == NULL)
+ return (-1);
+
+ (*pause->cb)(pause->ctx, res);
+ TAILQ_REMOVE(head, pause, next);
+ mm_free(pause);
+ return (0);
+}
+
+int
+evrpc_make_request(struct evrpc_request_wrapper *ctx)
+{
+ struct evrpc_pool *pool = ctx->pool;
+
+ /* initialize the event structure for this rpc */
+ evtimer_assign(&ctx->ev_timeout, pool->base, evrpc_request_timeout, ctx);
+
+ /* we better have some available connections on the pool */
+ EVUTIL_ASSERT(TAILQ_FIRST(&pool->connections) != NULL);
+
+ /*
+ * if no connection is available, we queue the request on the pool,
+ * the next time a connection is empty, the rpc will be send on that.
+ */
+ TAILQ_INSERT_TAIL(&pool->requests, ctx, next);
+
+ evrpc_pool_schedule(pool);
+
+ return (0);
+}
+
+
+struct evrpc_request_wrapper *
+evrpc_make_request_ctx(
+ struct evrpc_pool *pool, void *request, void *reply,
+ const char *rpcname,
+ void (*req_marshal)(struct evbuffer*, void *),
+ void (*rpl_clear)(void *),
+ int (*rpl_unmarshal)(void *, struct evbuffer *),
+ void (*cb)(struct evrpc_status *, void *, void *, void *),
+ void *cbarg)
+{
+ struct evrpc_request_wrapper *ctx = (struct evrpc_request_wrapper *)
+ mm_malloc(sizeof(struct evrpc_request_wrapper));
+ if (ctx == NULL)
+ return (NULL);
+
+ ctx->pool = pool;
+ ctx->hook_meta = NULL;
+ ctx->evcon = NULL;
+ ctx->name = mm_strdup(rpcname);
+ if (ctx->name == NULL) {
+ mm_free(ctx);
+ return (NULL);
+ }
+ ctx->cb = cb;
+ ctx->cb_arg = cbarg;
+ ctx->request = request;
+ ctx->reply = reply;
+ ctx->request_marshal = req_marshal;
+ ctx->reply_clear = rpl_clear;
+ ctx->reply_unmarshal = rpl_unmarshal;
+
+ return (ctx);
+}
+
+static void
+evrpc_reply_done_closure(void *, enum EVRPC_HOOK_RESULT);
+
+static void
+evrpc_reply_done(struct evhttp_request *req, void *arg)
+{
+ struct evrpc_request_wrapper *ctx = arg;
+ struct evrpc_pool *pool = ctx->pool;
+ int hook_res = EVRPC_CONTINUE;
+
+ /* cancel any timeout we might have scheduled */
+ event_del(&ctx->ev_timeout);
+
+ ctx->req = req;
+
+ /* we need to get the reply now */
+ if (req == NULL) {
+ evrpc_reply_done_closure(ctx, EVRPC_CONTINUE);
+ return;
+ }
+
+ if (TAILQ_FIRST(&pool->input_hooks) != NULL) {
+ evrpc_hook_associate_meta_(&ctx->hook_meta, ctx->evcon);
+
+ /* apply hooks to the incoming request */
+ hook_res = evrpc_process_hooks(&pool->input_hooks,
+ ctx, req, req->input_buffer);
+
+ switch (hook_res) {
+ case EVRPC_TERMINATE:
+ case EVRPC_CONTINUE:
+ break;
+ case EVRPC_PAUSE:
+ /*
+ * if we get paused we also need to know the
+ * request. unfortunately, the underlying
+ * layer is going to free it. we need to
+ * request ownership explicitly
+ */
+ if (req != NULL)
+ evhttp_request_own(req);
+
+ evrpc_pause_request(pool, ctx,
+ evrpc_reply_done_closure);
+ return;
+ default:
+ EVUTIL_ASSERT(hook_res == EVRPC_TERMINATE ||
+ hook_res == EVRPC_CONTINUE ||
+ hook_res == EVRPC_PAUSE);
+ }
+ }
+
+ evrpc_reply_done_closure(ctx, hook_res);
+
+ /* http request is being freed by underlying layer */
+}
+
+static void
+evrpc_reply_done_closure(void *arg, enum EVRPC_HOOK_RESULT hook_res)
+{
+ struct evrpc_request_wrapper *ctx = arg;
+ struct evhttp_request *req = ctx->req;
+ struct evrpc_pool *pool = ctx->pool;
+ struct evrpc_status status;
+ int res = -1;
+
+ memset(&status, 0, sizeof(status));
+ status.http_req = req;
+
+ /* we need to get the reply now */
+ if (req == NULL) {
+ status.error = EVRPC_STATUS_ERR_TIMEOUT;
+ } else if (hook_res == EVRPC_TERMINATE) {
+ status.error = EVRPC_STATUS_ERR_HOOKABORTED;
+ } else {
+ res = ctx->reply_unmarshal(ctx->reply, req->input_buffer);
+ if (res == -1)
+ status.error = EVRPC_STATUS_ERR_BADPAYLOAD;
+ }
+
+ if (res == -1) {
+ /* clear everything that we might have written previously */
+ ctx->reply_clear(ctx->reply);
+ }
+
+ (*ctx->cb)(&status, ctx->request, ctx->reply, ctx->cb_arg);
+
+ evrpc_request_wrapper_free(ctx);
+
+ /* the http layer owned the original request structure, but if we
+ * got paused, we asked for ownership and need to free it here. */
+ if (req != NULL && evhttp_request_is_owned(req))
+ evhttp_request_free(req);
+
+ /* see if we can schedule another request */
+ evrpc_pool_schedule(pool);
+}
+
+static void
+evrpc_pool_schedule(struct evrpc_pool *pool)
+{
+ struct evrpc_request_wrapper *ctx = TAILQ_FIRST(&pool->requests);
+ struct evhttp_connection *evcon;
+
+ /* if no requests are pending, we have no work */
+ if (ctx == NULL)
+ return;
+
+ if ((evcon = evrpc_pool_find_connection(pool)) != NULL) {
+ TAILQ_REMOVE(&pool->requests, ctx, next);
+ evrpc_schedule_request(evcon, ctx);
+ }
+}
+
+static void
+evrpc_request_timeout(evutil_socket_t fd, short what, void *arg)
+{
+ struct evrpc_request_wrapper *ctx = arg;
+ struct evhttp_connection *evcon = ctx->evcon;
+ EVUTIL_ASSERT(evcon != NULL);
+
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_TIMEOUT);
+}
+
+/*
+ * frees potential meta data associated with a request.
+ */
+
+static void
+evrpc_meta_data_free(struct evrpc_meta_list *meta_data)
+{
+ struct evrpc_meta *entry;
+ EVUTIL_ASSERT(meta_data != NULL);
+
+ while ((entry = TAILQ_FIRST(meta_data)) != NULL) {
+ TAILQ_REMOVE(meta_data, entry, next);
+ mm_free(entry->key);
+ mm_free(entry->data);
+ mm_free(entry);
+ }
+}
+
+static struct evrpc_hook_meta *
+evrpc_hook_meta_new_(void)
+{
+ struct evrpc_hook_meta *ctx;
+ ctx = mm_malloc(sizeof(struct evrpc_hook_meta));
+ EVUTIL_ASSERT(ctx != NULL);
+
+ TAILQ_INIT(&ctx->meta_data);
+ ctx->evcon = NULL;
+
+ return (ctx);
+}
+
+static void
+evrpc_hook_associate_meta_(struct evrpc_hook_meta **pctx,
+ struct evhttp_connection *evcon)
+{
+ struct evrpc_hook_meta *ctx = *pctx;
+ if (ctx == NULL)
+ *pctx = ctx = evrpc_hook_meta_new_();
+ ctx->evcon = evcon;
+}
+
+static void
+evrpc_hook_context_free_(struct evrpc_hook_meta *ctx)
+{
+ evrpc_meta_data_free(&ctx->meta_data);
+ mm_free(ctx);
+}
+
+/* Adds meta data */
+void
+evrpc_hook_add_meta(void *ctx, const char *key,
+ const void *data, size_t data_size)
+{
+ struct evrpc_request_wrapper *req = ctx;
+ struct evrpc_hook_meta *store = NULL;
+ struct evrpc_meta *meta = NULL;
+
+ if ((store = req->hook_meta) == NULL)
+ store = req->hook_meta = evrpc_hook_meta_new_();
+
+ meta = mm_malloc(sizeof(struct evrpc_meta));
+ EVUTIL_ASSERT(meta != NULL);
+ meta->key = mm_strdup(key);
+ EVUTIL_ASSERT(meta->key != NULL);
+ meta->data_size = data_size;
+ meta->data = mm_malloc(data_size);
+ EVUTIL_ASSERT(meta->data != NULL);
+ memcpy(meta->data, data, data_size);
+
+ TAILQ_INSERT_TAIL(&store->meta_data, meta, next);
+}
+
+int
+evrpc_hook_find_meta(void *ctx, const char *key, void **data, size_t *data_size)
+{
+ struct evrpc_request_wrapper *req = ctx;
+ struct evrpc_meta *meta = NULL;
+
+ if (req->hook_meta == NULL)
+ return (-1);
+
+ TAILQ_FOREACH(meta, &req->hook_meta->meta_data, next) {
+ if (strcmp(meta->key, key) == 0) {
+ *data = meta->data;
+ *data_size = meta->data_size;
+ return (0);
+ }
+ }
+
+ return (-1);
+}
+
+struct evhttp_connection *
+evrpc_hook_get_connection(void *ctx)
+{
+ struct evrpc_request_wrapper *req = ctx;
+ return (req->hook_meta != NULL ? req->hook_meta->evcon : NULL);
+}
+
+int
+evrpc_send_request_generic(struct evrpc_pool *pool,
+ void *request, void *reply,
+ void (*cb)(struct evrpc_status *, void *, void *, void *),
+ void *cb_arg,
+ const char *rpcname,
+ void (*req_marshal)(struct evbuffer *, void *),
+ void (*rpl_clear)(void *),
+ int (*rpl_unmarshal)(void *, struct evbuffer *))
+{
+ struct evrpc_status status;
+ struct evrpc_request_wrapper *ctx;
+ ctx = evrpc_make_request_ctx(pool, request, reply,
+ rpcname, req_marshal, rpl_clear, rpl_unmarshal, cb, cb_arg);
+ if (ctx == NULL)
+ goto error;
+ return (evrpc_make_request(ctx));
+error:
+ memset(&status, 0, sizeof(status));
+ status.error = EVRPC_STATUS_ERR_UNSTARTED;
+ (*(cb))(&status, request, reply, cb_arg);
+ return (-1);
+}
+
+/** Takes a request object and fills it in with the right magic */
+static struct evrpc *
+evrpc_register_object(const char *name,
+ void *(*req_new)(void*), void *req_new_arg, void (*req_free)(void *),
+ int (*req_unmarshal)(void *, struct evbuffer *),
+ void *(*rpl_new)(void*), void *rpl_new_arg, void (*rpl_free)(void *),
+ int (*rpl_complete)(void *),
+ void (*rpl_marshal)(struct evbuffer *, void *))
+{
+ struct evrpc* rpc = (struct evrpc *)mm_calloc(1, sizeof(struct evrpc));
+ if (rpc == NULL)
+ return (NULL);
+ rpc->uri = mm_strdup(name);
+ if (rpc->uri == NULL) {
+ mm_free(rpc);
+ return (NULL);
+ }
+ rpc->request_new = req_new;
+ rpc->request_new_arg = req_new_arg;
+ rpc->request_free = req_free;
+ rpc->request_unmarshal = req_unmarshal;
+ rpc->reply_new = rpl_new;
+ rpc->reply_new_arg = rpl_new_arg;
+ rpc->reply_free = rpl_free;
+ rpc->reply_complete = rpl_complete;
+ rpc->reply_marshal = rpl_marshal;
+ return (rpc);
+}
+
+int
+evrpc_register_generic(struct evrpc_base *base, const char *name,
+ void (*callback)(struct evrpc_req_generic *, void *), void *cbarg,
+ void *(*req_new)(void *), void *req_new_arg, void (*req_free)(void *),
+ int (*req_unmarshal)(void *, struct evbuffer *),
+ void *(*rpl_new)(void *), void *rpl_new_arg, void (*rpl_free)(void *),
+ int (*rpl_complete)(void *),
+ void (*rpl_marshal)(struct evbuffer *, void *))
+{
+ struct evrpc* rpc =
+ evrpc_register_object(name, req_new, req_new_arg, req_free, req_unmarshal,
+ rpl_new, rpl_new_arg, rpl_free, rpl_complete, rpl_marshal);
+ if (rpc == NULL)
+ return (-1);
+ evrpc_register_rpc(base, rpc,
+ (void (*)(struct evrpc_req_generic*, void *))callback, cbarg);
+ return (0);
+}
+
+/** accessors for obscure and undocumented functionality */
+struct evrpc_pool *
+evrpc_request_get_pool(struct evrpc_request_wrapper *ctx)
+{
+ return (ctx->pool);
+}
+
+void
+evrpc_request_set_pool(struct evrpc_request_wrapper *ctx,
+ struct evrpc_pool *pool)
+{
+ ctx->pool = pool;
+}
+
+void
+evrpc_request_set_cb(struct evrpc_request_wrapper *ctx,
+ void (*cb)(struct evrpc_status*, void *request, void *reply, void *arg),
+ void *cb_arg)
+{
+ ctx->cb = cb;
+ ctx->cb_arg = cb_arg;
+}
diff --git a/libs/libevent/src/evsignal-internal.h b/libs/libevent/src/evsignal-internal.h
new file mode 100644
index 0000000000..5cff03b525
--- /dev/null
+++ b/libs/libevent/src/evsignal-internal.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVSIGNAL_INTERNAL_H_INCLUDED_
+#define EVSIGNAL_INTERNAL_H_INCLUDED_
+
+#ifndef evutil_socket_t
+#include "event2/util.h"
+#endif
+#include <signal.h>
+
+typedef void (*ev_sighandler_t)(int);
+
+/* Data structure for the default signal-handling implementation in signal.c
+ */
+struct evsig_info {
+ /* Event watching ev_signal_pair[1] */
+ struct event ev_signal;
+ /* Socketpair used to send notifications from the signal handler */
+ evutil_socket_t ev_signal_pair[2];
+ /* True iff we've added the ev_signal event yet. */
+ int ev_signal_added;
+ /* Count of the number of signals we're currently watching. */
+ int ev_n_signals_added;
+
+ /* Array of previous signal handler objects before Libevent started
+ * messing with them. Used to restore old signal handlers. */
+#ifdef EVENT__HAVE_SIGACTION
+ struct sigaction **sh_old;
+#else
+ ev_sighandler_t **sh_old;
+#endif
+ /* Size of sh_old. */
+ int sh_old_max;
+};
+int evsig_init_(struct event_base *);
+void evsig_dealloc_(struct event_base *);
+
+void evsig_set_base_(struct event_base *base);
+void evsig_free_globals_(void);
+
+#endif /* EVSIGNAL_INTERNAL_H_INCLUDED_ */
diff --git a/libs/libevent/src/evthread-internal.h b/libs/libevent/src/evthread-internal.h
new file mode 100644
index 0000000000..efdecf81e7
--- /dev/null
+++ b/libs/libevent/src/evthread-internal.h
@@ -0,0 +1,392 @@
+/*
+ * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVTHREAD_INTERNAL_H_INCLUDED_
+#define EVTHREAD_INTERNAL_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include "event2/thread.h"
+#include "util-internal.h"
+
+struct event_base;
+
+#ifndef _WIN32
+/* On Windows, the way we currently make DLLs, it's not allowed for us to
+ * have shared global structures. Thus, we only do the direct-call-to-function
+ * code path if we know that the local shared library system supports it.
+ */
+#define EVTHREAD_EXPOSE_STRUCTS
+#endif
+
+#if ! defined(EVENT__DISABLE_THREAD_SUPPORT) && defined(EVTHREAD_EXPOSE_STRUCTS)
+/* Global function pointers to lock-related functions. NULL if locking isn't
+ enabled. */
+extern struct evthread_lock_callbacks evthread_lock_fns_;
+extern struct evthread_condition_callbacks evthread_cond_fns_;
+extern unsigned long (*evthread_id_fn_)(void);
+extern int evthread_lock_debugging_enabled_;
+
+/** Return the ID of the current thread, or 1 if threading isn't enabled. */
+#define EVTHREAD_GET_ID() \
+ (evthread_id_fn_ ? evthread_id_fn_() : 1)
+
+/** Return true iff we're in the thread that is currently (or most recently)
+ * running a given event_base's loop. Requires lock. */
+#define EVBASE_IN_THREAD(base) \
+ (evthread_id_fn_ == NULL || \
+ (base)->th_owner_id == evthread_id_fn_())
+
+/** Return true iff we need to notify the base's main thread about changes to
+ * its state, because it's currently running the main loop in another
+ * thread. Requires lock. */
+#define EVBASE_NEED_NOTIFY(base) \
+ (evthread_id_fn_ != NULL && \
+ (base)->running_loop && \
+ (base)->th_owner_id != evthread_id_fn_())
+
+/** Allocate a new lock, and store it in lockvar, a void*. Sets lockvar to
+ NULL if locking is not enabled. */
+#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \
+ ((lockvar) = evthread_lock_fns_.alloc ? \
+ evthread_lock_fns_.alloc(locktype) : NULL)
+
+/** Free a given lock, if it is present and locking is enabled. */
+#define EVTHREAD_FREE_LOCK(lockvar, locktype) \
+ do { \
+ void *lock_tmp_ = (lockvar); \
+ if (lock_tmp_ && evthread_lock_fns_.free) \
+ evthread_lock_fns_.free(lock_tmp_, (locktype)); \
+ } while (0)
+
+/** Acquire a lock. */
+#define EVLOCK_LOCK(lockvar,mode) \
+ do { \
+ if (lockvar) \
+ evthread_lock_fns_.lock(mode, lockvar); \
+ } while (0)
+
+/** Release a lock */
+#define EVLOCK_UNLOCK(lockvar,mode) \
+ do { \
+ if (lockvar) \
+ evthread_lock_fns_.unlock(mode, lockvar); \
+ } while (0)
+
+/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
+#define EVLOCK_SORTLOCKS_(lockvar1, lockvar2) \
+ do { \
+ if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
+ void *tmp = lockvar1; \
+ lockvar1 = lockvar2; \
+ lockvar2 = tmp; \
+ } \
+ } while (0)
+
+/** Lock an event_base, if it is set up for locking. Acquires the lock
+ in the base structure whose field is named 'lockvar'. */
+#define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \
+ EVLOCK_LOCK((base)->lockvar, 0); \
+ } while (0)
+
+/** Unlock an event_base, if it is set up for locking. */
+#define EVBASE_RELEASE_LOCK(base, lockvar) do { \
+ EVLOCK_UNLOCK((base)->lockvar, 0); \
+ } while (0)
+
+/** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
+ * locked and held by us. */
+#define EVLOCK_ASSERT_LOCKED(lock) \
+ do { \
+ if ((lock) && evthread_lock_debugging_enabled_) { \
+ EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \
+ } \
+ } while (0)
+
+/** Try to grab the lock for 'lockvar' without blocking, and return 1 if we
+ * manage to get it. */
+static inline int EVLOCK_TRY_LOCK_(void *lock);
+static inline int
+EVLOCK_TRY_LOCK_(void *lock)
+{
+ if (lock && evthread_lock_fns_.lock) {
+ int r = evthread_lock_fns_.lock(EVTHREAD_TRY, lock);
+ return !r;
+ } else {
+ /* Locking is disabled either globally or for this thing;
+ * of course we count as having the lock. */
+ return 1;
+ }
+}
+
+/** Allocate a new condition variable and store it in the void *, condvar */
+#define EVTHREAD_ALLOC_COND(condvar) \
+ do { \
+ (condvar) = evthread_cond_fns_.alloc_condition ? \
+ evthread_cond_fns_.alloc_condition(0) : NULL; \
+ } while (0)
+/** Deallocate and free a condition variable in condvar */
+#define EVTHREAD_FREE_COND(cond) \
+ do { \
+ if (cond) \
+ evthread_cond_fns_.free_condition((cond)); \
+ } while (0)
+/** Signal one thread waiting on cond */
+#define EVTHREAD_COND_SIGNAL(cond) \
+ ( (cond) ? evthread_cond_fns_.signal_condition((cond), 0) : 0 )
+/** Signal all threads waiting on cond */
+#define EVTHREAD_COND_BROADCAST(cond) \
+ ( (cond) ? evthread_cond_fns_.signal_condition((cond), 1) : 0 )
+/** Wait until the condition 'cond' is signalled. Must be called while
+ * holding 'lock'. The lock will be released until the condition is
+ * signalled, at which point it will be acquired again. Returns 0 for
+ * success, -1 for failure. */
+#define EVTHREAD_COND_WAIT(cond, lock) \
+ ( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), NULL) : 0 )
+/** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1
+ * on timeout. */
+#define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \
+ ( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), (tv)) : 0 )
+
+/** True iff locking functions have been configured. */
+#define EVTHREAD_LOCKING_ENABLED() \
+ (evthread_lock_fns_.lock != NULL)
+
+#elif ! defined(EVENT__DISABLE_THREAD_SUPPORT)
+
+unsigned long evthreadimpl_get_id_(void);
+int evthreadimpl_is_lock_debugging_enabled_(void);
+void *evthreadimpl_lock_alloc_(unsigned locktype);
+void evthreadimpl_lock_free_(void *lock, unsigned locktype);
+int evthreadimpl_lock_lock_(unsigned mode, void *lock);
+int evthreadimpl_lock_unlock_(unsigned mode, void *lock);
+void *evthreadimpl_cond_alloc_(unsigned condtype);
+void evthreadimpl_cond_free_(void *cond);
+int evthreadimpl_cond_signal_(void *cond, int broadcast);
+int evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv);
+int evthreadimpl_locking_enabled_(void);
+
+#define EVTHREAD_GET_ID() evthreadimpl_get_id_()
+#define EVBASE_IN_THREAD(base) \
+ ((base)->th_owner_id == evthreadimpl_get_id_())
+#define EVBASE_NEED_NOTIFY(base) \
+ ((base)->running_loop && \
+ ((base)->th_owner_id != evthreadimpl_get_id_()))
+
+#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \
+ ((lockvar) = evthreadimpl_lock_alloc_(locktype))
+
+#define EVTHREAD_FREE_LOCK(lockvar, locktype) \
+ do { \
+ void *lock_tmp_ = (lockvar); \
+ if (lock_tmp_) \
+ evthreadimpl_lock_free_(lock_tmp_, (locktype)); \
+ } while (0)
+
+/** Acquire a lock. */
+#define EVLOCK_LOCK(lockvar,mode) \
+ do { \
+ if (lockvar) \
+ evthreadimpl_lock_lock_(mode, lockvar); \
+ } while (0)
+
+/** Release a lock */
+#define EVLOCK_UNLOCK(lockvar,mode) \
+ do { \
+ if (lockvar) \
+ evthreadimpl_lock_unlock_(mode, lockvar); \
+ } while (0)
+
+/** Lock an event_base, if it is set up for locking. Acquires the lock
+ in the base structure whose field is named 'lockvar'. */
+#define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \
+ EVLOCK_LOCK((base)->lockvar, 0); \
+ } while (0)
+
+/** Unlock an event_base, if it is set up for locking. */
+#define EVBASE_RELEASE_LOCK(base, lockvar) do { \
+ EVLOCK_UNLOCK((base)->lockvar, 0); \
+ } while (0)
+
+/** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
+ * locked and held by us. */
+#define EVLOCK_ASSERT_LOCKED(lock) \
+ do { \
+ if ((lock) && evthreadimpl_is_lock_debugging_enabled_()) { \
+ EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \
+ } \
+ } while (0)
+
+/** Try to grab the lock for 'lockvar' without blocking, and return 1 if we
+ * manage to get it. */
+static inline int EVLOCK_TRY_LOCK_(void *lock);
+static inline int
+EVLOCK_TRY_LOCK_(void *lock)
+{
+ if (lock) {
+ int r = evthreadimpl_lock_lock_(EVTHREAD_TRY, lock);
+ return !r;
+ } else {
+ /* Locking is disabled either globally or for this thing;
+ * of course we count as having the lock. */
+ return 1;
+ }
+}
+
+/** Allocate a new condition variable and store it in the void *, condvar */
+#define EVTHREAD_ALLOC_COND(condvar) \
+ do { \
+ (condvar) = evthreadimpl_cond_alloc_(0); \
+ } while (0)
+/** Deallocate and free a condition variable in condvar */
+#define EVTHREAD_FREE_COND(cond) \
+ do { \
+ if (cond) \
+ evthreadimpl_cond_free_((cond)); \
+ } while (0)
+/** Signal one thread waiting on cond */
+#define EVTHREAD_COND_SIGNAL(cond) \
+ ( (cond) ? evthreadimpl_cond_signal_((cond), 0) : 0 )
+/** Signal all threads waiting on cond */
+#define EVTHREAD_COND_BROADCAST(cond) \
+ ( (cond) ? evthreadimpl_cond_signal_((cond), 1) : 0 )
+/** Wait until the condition 'cond' is signalled. Must be called while
+ * holding 'lock'. The lock will be released until the condition is
+ * signalled, at which point it will be acquired again. Returns 0 for
+ * success, -1 for failure. */
+#define EVTHREAD_COND_WAIT(cond, lock) \
+ ( (cond) ? evthreadimpl_cond_wait_((cond), (lock), NULL) : 0 )
+/** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1
+ * on timeout. */
+#define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \
+ ( (cond) ? evthreadimpl_cond_wait_((cond), (lock), (tv)) : 0 )
+
+#define EVTHREAD_LOCKING_ENABLED() \
+ (evthreadimpl_locking_enabled_())
+
+#else /* EVENT__DISABLE_THREAD_SUPPORT */
+
+#define EVTHREAD_GET_ID() 1
+#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_
+#define EVTHREAD_FREE_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_
+
+#define EVLOCK_LOCK(lockvar, mode) EVUTIL_NIL_STMT_
+#define EVLOCK_UNLOCK(lockvar, mode) EVUTIL_NIL_STMT_
+#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_
+#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_
+
+#define EVBASE_IN_THREAD(base) 1
+#define EVBASE_NEED_NOTIFY(base) 0
+#define EVBASE_ACQUIRE_LOCK(base, lock) EVUTIL_NIL_STMT_
+#define EVBASE_RELEASE_LOCK(base, lock) EVUTIL_NIL_STMT_
+#define EVLOCK_ASSERT_LOCKED(lock) EVUTIL_NIL_STMT_
+
+#define EVLOCK_TRY_LOCK_(lock) 1
+
+#define EVTHREAD_ALLOC_COND(condvar) EVUTIL_NIL_STMT_
+#define EVTHREAD_FREE_COND(cond) EVUTIL_NIL_STMT_
+#define EVTHREAD_COND_SIGNAL(cond) EVUTIL_NIL_STMT_
+#define EVTHREAD_COND_BROADCAST(cond) EVUTIL_NIL_STMT_
+#define EVTHREAD_COND_WAIT(cond, lock) EVUTIL_NIL_STMT_
+#define EVTHREAD_COND_WAIT_TIMED(cond, lock, howlong) EVUTIL_NIL_STMT_
+
+#define EVTHREAD_LOCKING_ENABLED() 0
+
+#endif
+
+/* This code is shared between both lock impls */
+#if ! defined(EVENT__DISABLE_THREAD_SUPPORT)
+/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
+#define EVLOCK_SORTLOCKS_(lockvar1, lockvar2) \
+ do { \
+ if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
+ void *tmp = lockvar1; \
+ lockvar1 = lockvar2; \
+ lockvar2 = tmp; \
+ } \
+ } while (0)
+
+/** Acquire both lock1 and lock2. Always allocates locks in the same order,
+ * so that two threads locking two locks with LOCK2 will not deadlock. */
+#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) \
+ do { \
+ void *lock1_tmplock_ = (lock1); \
+ void *lock2_tmplock_ = (lock2); \
+ EVLOCK_SORTLOCKS_(lock1_tmplock_,lock2_tmplock_); \
+ EVLOCK_LOCK(lock1_tmplock_,mode1); \
+ if (lock2_tmplock_ != lock1_tmplock_) \
+ EVLOCK_LOCK(lock2_tmplock_,mode2); \
+ } while (0)
+/** Release both lock1 and lock2. */
+#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) \
+ do { \
+ void *lock1_tmplock_ = (lock1); \
+ void *lock2_tmplock_ = (lock2); \
+ EVLOCK_SORTLOCKS_(lock1_tmplock_,lock2_tmplock_); \
+ if (lock2_tmplock_ != lock1_tmplock_) \
+ EVLOCK_UNLOCK(lock2_tmplock_,mode2); \
+ EVLOCK_UNLOCK(lock1_tmplock_,mode1); \
+ } while (0)
+
+int evthread_is_debug_lock_held_(void *lock);
+void *evthread_debug_get_real_lock_(void *lock);
+
+void *evthread_setup_global_lock_(void *lock_, unsigned locktype,
+ int enable_locks);
+
+#define EVTHREAD_SETUP_GLOBAL_LOCK(lockvar, locktype) \
+ do { \
+ lockvar = evthread_setup_global_lock_(lockvar, \
+ (locktype), enable_locks); \
+ if (!lockvar) { \
+ event_warn("Couldn't allocate %s", #lockvar); \
+ return -1; \
+ } \
+ } while (0);
+
+int event_global_setup_locks_(const int enable_locks);
+int evsig_global_setup_locks_(const int enable_locks);
+int evutil_global_setup_locks_(const int enable_locks);
+int evutil_secure_rng_global_setup_locks_(const int enable_locks);
+
+/** Return current evthread_lock_callbacks */
+struct evthread_lock_callbacks *evthread_get_lock_callbacks(void);
+/** Return current evthread_condition_callbacks */
+struct evthread_condition_callbacks *evthread_get_condition_callbacks(void);
+/** Disable locking for internal usage (like global shutdown) */
+void evthreadimpl_disable_lock_debugging_(void);
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVTHREAD_INTERNAL_H_INCLUDED_ */
diff --git a/libs/libevent/src/evthread.c b/libs/libevent/src/evthread.c
new file mode 100644
index 0000000000..f3f1eddc89
--- /dev/null
+++ b/libs/libevent/src/evthread.c
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+
+#include "event2/thread.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "util-internal.h"
+#include "evthread-internal.h"
+
+#ifdef EVTHREAD_EXPOSE_STRUCTS
+#define GLOBAL
+#else
+#define GLOBAL static
+#endif
+
+#ifndef EVENT__DISABLE_DEBUG_MODE
+extern int event_debug_created_threadable_ctx_;
+extern int event_debug_mode_on_;
+#endif
+
+/* globals */
+GLOBAL int evthread_lock_debugging_enabled_ = 0;
+GLOBAL struct evthread_lock_callbacks evthread_lock_fns_ = {
+ 0, 0, NULL, NULL, NULL, NULL
+};
+GLOBAL unsigned long (*evthread_id_fn_)(void) = NULL;
+GLOBAL struct evthread_condition_callbacks evthread_cond_fns_ = {
+ 0, NULL, NULL, NULL, NULL
+};
+
+/* Used for debugging */
+static struct evthread_lock_callbacks original_lock_fns_ = {
+ 0, 0, NULL, NULL, NULL, NULL
+};
+static struct evthread_condition_callbacks original_cond_fns_ = {
+ 0, NULL, NULL, NULL, NULL
+};
+
+void
+evthread_set_id_callback(unsigned long (*id_fn)(void))
+{
+ evthread_id_fn_ = id_fn;
+}
+
+struct evthread_lock_callbacks *evthread_get_lock_callbacks()
+{
+ return evthread_lock_debugging_enabled_
+ ? &original_lock_fns_ : &evthread_lock_fns_;
+}
+struct evthread_condition_callbacks *evthread_get_condition_callbacks()
+{
+ return evthread_lock_debugging_enabled_
+ ? &original_cond_fns_ : &evthread_cond_fns_;
+}
+void evthreadimpl_disable_lock_debugging_(void)
+{
+ evthread_lock_debugging_enabled_ = 0;
+}
+
+int
+evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
+{
+ struct evthread_lock_callbacks *target = evthread_get_lock_callbacks();
+
+#ifndef EVENT__DISABLE_DEBUG_MODE
+ if (event_debug_mode_on_) {
+ if (event_debug_created_threadable_ctx_) {
+ event_errx(1, "evthread initialization must be called BEFORE anything else!");
+ }
+ }
+#endif
+
+ if (!cbs) {
+ if (target->alloc)
+ event_warnx("Trying to disable lock functions after "
+ "they have been set up will probaby not work.");
+ memset(target, 0, sizeof(evthread_lock_fns_));
+ return 0;
+ }
+ if (target->alloc) {
+ /* Uh oh; we already had locking callbacks set up.*/
+ if (target->lock_api_version == cbs->lock_api_version &&
+ target->supported_locktypes == cbs->supported_locktypes &&
+ target->alloc == cbs->alloc &&
+ target->free == cbs->free &&
+ target->lock == cbs->lock &&
+ target->unlock == cbs->unlock) {
+ /* no change -- allow this. */
+ return 0;
+ }
+ event_warnx("Can't change lock callbacks once they have been "
+ "initialized.");
+ return -1;
+ }
+ if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
+ memcpy(target, cbs, sizeof(evthread_lock_fns_));
+ return event_global_setup_locks_(1);
+ } else {
+ return -1;
+ }
+}
+
+int
+evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
+{
+ struct evthread_condition_callbacks *target = evthread_get_condition_callbacks();
+
+#ifndef EVENT__DISABLE_DEBUG_MODE
+ if (event_debug_mode_on_) {
+ if (event_debug_created_threadable_ctx_) {
+ event_errx(1, "evthread initialization must be called BEFORE anything else!");
+ }
+ }
+#endif
+
+ if (!cbs) {
+ if (target->alloc_condition)
+ event_warnx("Trying to disable condition functions "
+ "after they have been set up will probaby not "
+ "work.");
+ memset(target, 0, sizeof(evthread_cond_fns_));
+ return 0;
+ }
+ if (target->alloc_condition) {
+ /* Uh oh; we already had condition callbacks set up.*/
+ if (target->condition_api_version == cbs->condition_api_version &&
+ target->alloc_condition == cbs->alloc_condition &&
+ target->free_condition == cbs->free_condition &&
+ target->signal_condition == cbs->signal_condition &&
+ target->wait_condition == cbs->wait_condition) {
+ /* no change -- allow this. */
+ return 0;
+ }
+ event_warnx("Can't change condition callbacks once they "
+ "have been initialized.");
+ return -1;
+ }
+ if (cbs->alloc_condition && cbs->free_condition &&
+ cbs->signal_condition && cbs->wait_condition) {
+ memcpy(target, cbs, sizeof(evthread_cond_fns_));
+ }
+ if (evthread_lock_debugging_enabled_) {
+ evthread_cond_fns_.alloc_condition = cbs->alloc_condition;
+ evthread_cond_fns_.free_condition = cbs->free_condition;
+ evthread_cond_fns_.signal_condition = cbs->signal_condition;
+ }
+ return 0;
+}
+
+#define DEBUG_LOCK_SIG 0xdeb0b10c
+
+struct debug_lock {
+ unsigned signature;
+ unsigned locktype;
+ unsigned long held_by;
+ /* XXXX if we ever use read-write locks, we will need a separate
+ * lock to protect count. */
+ int count;
+ void *lock;
+};
+
+static void *
+debug_lock_alloc(unsigned locktype)
+{
+ struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));
+ if (!result)
+ return NULL;
+ if (original_lock_fns_.alloc) {
+ if (!(result->lock = original_lock_fns_.alloc(
+ locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) {
+ mm_free(result);
+ return NULL;
+ }
+ } else {
+ result->lock = NULL;
+ }
+ result->signature = DEBUG_LOCK_SIG;
+ result->locktype = locktype;
+ result->count = 0;
+ result->held_by = 0;
+ return result;
+}
+
+static void
+debug_lock_free(void *lock_, unsigned locktype)
+{
+ struct debug_lock *lock = lock_;
+ EVUTIL_ASSERT(lock->count == 0);
+ EVUTIL_ASSERT(locktype == lock->locktype);
+ EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
+ if (original_lock_fns_.free) {
+ original_lock_fns_.free(lock->lock,
+ lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
+ }
+ lock->lock = NULL;
+ lock->count = -100;
+ lock->signature = 0x12300fda;
+ mm_free(lock);
+}
+
+static void
+evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)
+{
+ EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
+ ++lock->count;
+ if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
+ EVUTIL_ASSERT(lock->count == 1);
+ if (evthread_id_fn_) {
+ unsigned long me;
+ me = evthread_id_fn_();
+ if (lock->count > 1)
+ EVUTIL_ASSERT(lock->held_by == me);
+ lock->held_by = me;
+ }
+}
+
+static int
+debug_lock_lock(unsigned mode, void *lock_)
+{
+ struct debug_lock *lock = lock_;
+ int res = 0;
+ if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
+ EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
+ else
+ EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
+ if (original_lock_fns_.lock)
+ res = original_lock_fns_.lock(mode, lock->lock);
+ if (!res) {
+ evthread_debug_lock_mark_locked(mode, lock);
+ }
+ return res;
+}
+
+static void
+evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
+{
+ EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
+ if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
+ EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
+ else
+ EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
+ if (evthread_id_fn_) {
+ unsigned long me;
+ me = evthread_id_fn_();
+ EVUTIL_ASSERT(lock->held_by == me);
+ if (lock->count == 1)
+ lock->held_by = 0;
+ }
+ --lock->count;
+ EVUTIL_ASSERT(lock->count >= 0);
+}
+
+static int
+debug_lock_unlock(unsigned mode, void *lock_)
+{
+ struct debug_lock *lock = lock_;
+ int res = 0;
+ evthread_debug_lock_mark_unlocked(mode, lock);
+ if (original_lock_fns_.unlock)
+ res = original_lock_fns_.unlock(mode, lock->lock);
+ return res;
+}
+
+static int
+debug_cond_wait(void *cond_, void *lock_, const struct timeval *tv)
+{
+ int r;
+ struct debug_lock *lock = lock_;
+ EVUTIL_ASSERT(lock);
+ EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
+ EVLOCK_ASSERT_LOCKED(lock_);
+ evthread_debug_lock_mark_unlocked(0, lock);
+ r = original_cond_fns_.wait_condition(cond_, lock->lock, tv);
+ evthread_debug_lock_mark_locked(0, lock);
+ return r;
+}
+
+/* misspelled version for backward compatibility */
+void
+evthread_enable_lock_debuging(void)
+{
+ evthread_enable_lock_debugging();
+}
+
+void
+evthread_enable_lock_debugging(void)
+{
+ struct evthread_lock_callbacks cbs = {
+ EVTHREAD_LOCK_API_VERSION,
+ EVTHREAD_LOCKTYPE_RECURSIVE,
+ debug_lock_alloc,
+ debug_lock_free,
+ debug_lock_lock,
+ debug_lock_unlock
+ };
+ if (evthread_lock_debugging_enabled_)
+ return;
+ memcpy(&original_lock_fns_, &evthread_lock_fns_,
+ sizeof(struct evthread_lock_callbacks));
+ memcpy(&evthread_lock_fns_, &cbs,
+ sizeof(struct evthread_lock_callbacks));
+
+ memcpy(&original_cond_fns_, &evthread_cond_fns_,
+ sizeof(struct evthread_condition_callbacks));
+ evthread_cond_fns_.wait_condition = debug_cond_wait;
+ evthread_lock_debugging_enabled_ = 1;
+
+ /* XXX return value should get checked. */
+ event_global_setup_locks_(0);
+}
+
+int
+evthread_is_debug_lock_held_(void *lock_)
+{
+ struct debug_lock *lock = lock_;
+ if (! lock->count)
+ return 0;
+ if (evthread_id_fn_) {
+ unsigned long me = evthread_id_fn_();
+ if (lock->held_by != me)
+ return 0;
+ }
+ return 1;
+}
+
+void *
+evthread_debug_get_real_lock_(void *lock_)
+{
+ struct debug_lock *lock = lock_;
+ return lock->lock;
+}
+
+void *
+evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
+{
+ /* there are four cases here:
+ 1) we're turning on debugging; locking is not on.
+ 2) we're turning on debugging; locking is on.
+ 3) we're turning on locking; debugging is not on.
+ 4) we're turning on locking; debugging is on. */
+
+ if (!enable_locks && original_lock_fns_.alloc == NULL) {
+ /* Case 1: allocate a debug lock. */
+ EVUTIL_ASSERT(lock_ == NULL);
+ return debug_lock_alloc(locktype);
+ } else if (!enable_locks && original_lock_fns_.alloc != NULL) {
+ /* Case 2: wrap the lock in a debug lock. */
+ struct debug_lock *lock;
+ EVUTIL_ASSERT(lock_ != NULL);
+
+ if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
+ /* We can't wrap it: We need a recursive lock */
+ original_lock_fns_.free(lock_, locktype);
+ return debug_lock_alloc(locktype);
+ }
+ lock = mm_malloc(sizeof(struct debug_lock));
+ if (!lock) {
+ original_lock_fns_.free(lock_, locktype);
+ return NULL;
+ }
+ lock->lock = lock_;
+ lock->locktype = locktype;
+ lock->count = 0;
+ lock->held_by = 0;
+ return lock;
+ } else if (enable_locks && ! evthread_lock_debugging_enabled_) {
+ /* Case 3: allocate a regular lock */
+ EVUTIL_ASSERT(lock_ == NULL);
+ return evthread_lock_fns_.alloc(locktype);
+ } else {
+ /* Case 4: Fill in a debug lock with a real lock */
+ struct debug_lock *lock = lock_ ? lock_ : debug_lock_alloc(locktype);
+ EVUTIL_ASSERT(enable_locks &&
+ evthread_lock_debugging_enabled_);
+ EVUTIL_ASSERT(lock->locktype == locktype);
+ if (!lock->lock) {
+ lock->lock = original_lock_fns_.alloc(
+ locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
+ if (!lock->lock) {
+ lock->count = -200;
+ mm_free(lock);
+ return NULL;
+ }
+ }
+ return lock;
+ }
+}
+
+
+#ifndef EVTHREAD_EXPOSE_STRUCTS
+unsigned long
+evthreadimpl_get_id_()
+{
+ return evthread_id_fn_ ? evthread_id_fn_() : 1;
+}
+void *
+evthreadimpl_lock_alloc_(unsigned locktype)
+{
+#ifndef EVENT__DISABLE_DEBUG_MODE
+ if (event_debug_mode_on_) {
+ event_debug_created_threadable_ctx_ = 1;
+ }
+#endif
+
+ return evthread_lock_fns_.alloc ?
+ evthread_lock_fns_.alloc(locktype) : NULL;
+}
+void
+evthreadimpl_lock_free_(void *lock, unsigned locktype)
+{
+ if (evthread_lock_fns_.free)
+ evthread_lock_fns_.free(lock, locktype);
+}
+int
+evthreadimpl_lock_lock_(unsigned mode, void *lock)
+{
+ if (evthread_lock_fns_.lock)
+ return evthread_lock_fns_.lock(mode, lock);
+ else
+ return 0;
+}
+int
+evthreadimpl_lock_unlock_(unsigned mode, void *lock)
+{
+ if (evthread_lock_fns_.unlock)
+ return evthread_lock_fns_.unlock(mode, lock);
+ else
+ return 0;
+}
+void *
+evthreadimpl_cond_alloc_(unsigned condtype)
+{
+#ifndef EVENT__DISABLE_DEBUG_MODE
+ if (event_debug_mode_on_) {
+ event_debug_created_threadable_ctx_ = 1;
+ }
+#endif
+
+ return evthread_cond_fns_.alloc_condition ?
+ evthread_cond_fns_.alloc_condition(condtype) : NULL;
+}
+void
+evthreadimpl_cond_free_(void *cond)
+{
+ if (evthread_cond_fns_.free_condition)
+ evthread_cond_fns_.free_condition(cond);
+}
+int
+evthreadimpl_cond_signal_(void *cond, int broadcast)
+{
+ if (evthread_cond_fns_.signal_condition)
+ return evthread_cond_fns_.signal_condition(cond, broadcast);
+ else
+ return 0;
+}
+int
+evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv)
+{
+ if (evthread_cond_fns_.wait_condition)
+ return evthread_cond_fns_.wait_condition(cond, lock, tv);
+ else
+ return 0;
+}
+int
+evthreadimpl_is_lock_debugging_enabled_(void)
+{
+ return evthread_lock_debugging_enabled_;
+}
+
+int
+evthreadimpl_locking_enabled_(void)
+{
+ return evthread_lock_fns_.lock != NULL;
+}
+#endif
+
+#endif
diff --git a/libs/libevent/src/evthread_win32.c b/libs/libevent/src/evthread_win32.c
new file mode 100644
index 0000000000..2ec80560a5
--- /dev/null
+++ b/libs/libevent/src/evthread_win32.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+#ifndef _WIN32_WINNT
+/* Minimum required for InitializeCriticalSectionAndSpinCount */
+#define _WIN32_WINNT 0x0403
+#endif
+#include <winsock2.h>
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#include <sys/locking.h>
+#endif
+
+struct event_base;
+#include "event2/thread.h"
+
+#include "mm-internal.h"
+#include "evthread-internal.h"
+#include "time-internal.h"
+
+#define SPIN_COUNT 2000
+
+static void *
+evthread_win32_lock_create(unsigned locktype)
+{
+ CRITICAL_SECTION *lock = mm_malloc(sizeof(CRITICAL_SECTION));
+ if (!lock)
+ return NULL;
+ if (InitializeCriticalSectionAndSpinCount(lock, SPIN_COUNT) == 0) {
+ mm_free(lock);
+ return NULL;
+ }
+ return lock;
+}
+
+static void
+evthread_win32_lock_free(void *lock_, unsigned locktype)
+{
+ CRITICAL_SECTION *lock = lock_;
+ DeleteCriticalSection(lock);
+ mm_free(lock);
+}
+
+static int
+evthread_win32_lock(unsigned mode, void *lock_)
+{
+ CRITICAL_SECTION *lock = lock_;
+ if ((mode & EVTHREAD_TRY)) {
+ return ! TryEnterCriticalSection(lock);
+ } else {
+ EnterCriticalSection(lock);
+ return 0;
+ }
+}
+
+static int
+evthread_win32_unlock(unsigned mode, void *lock_)
+{
+ CRITICAL_SECTION *lock = lock_;
+ LeaveCriticalSection(lock);
+ return 0;
+}
+
+static unsigned long
+evthread_win32_get_id(void)
+{
+ return (unsigned long) GetCurrentThreadId();
+}
+
+#ifdef WIN32_HAVE_CONDITION_VARIABLES
+static void WINAPI (*InitializeConditionVariable_fn)(PCONDITION_VARIABLE)
+ = NULL;
+static BOOL WINAPI (*SleepConditionVariableCS_fn)(
+ PCONDITION_VARIABLE, PCRITICAL_SECTION, DWORD) = NULL;
+static void WINAPI (*WakeAllConditionVariable_fn)(PCONDITION_VARIABLE) = NULL;
+static void WINAPI (*WakeConditionVariable_fn)(PCONDITION_VARIABLE) = NULL;
+
+static int
+evthread_win32_condvar_init(void)
+{
+ HANDLE lib;
+
+ lib = GetModuleHandle(TEXT("kernel32.dll"));
+ if (lib == NULL)
+ return 0;
+
+#define LOAD(name) \
+ name##_fn = GetProcAddress(lib, #name)
+ LOAD(InitializeConditionVariable);
+ LOAD(SleepConditionVariableCS);
+ LOAD(WakeAllConditionVariable);
+ LOAD(WakeConditionVariable);
+
+ return InitializeConditionVariable_fn && SleepConditionVariableCS_fn &&
+ WakeAllConditionVariable_fn && WakeConditionVariable_fn;
+}
+
+/* XXXX Even if we can build this, we don't necessarily want to: the functions
+ * in question didn't exist before Vista, so we'd better LoadProc them. */
+static void *
+evthread_win32_condvar_alloc(unsigned condflags)
+{
+ CONDITION_VARIABLE *cond = mm_malloc(sizeof(CONDITION_VARIABLE));
+ if (!cond)
+ return NULL;
+ InitializeConditionVariable_fn(cond);
+ return cond;
+}
+
+static void
+evthread_win32_condvar_free(void *cond_)
+{
+ CONDITION_VARIABLE *cond = cond_;
+ /* There doesn't _seem_ to be a cleaup fn here... */
+ mm_free(cond);
+}
+
+static int
+evthread_win32_condvar_signal(void *cond, int broadcast)
+{
+ CONDITION_VARIABLE *cond = cond_;
+ if (broadcast)
+ WakeAllConditionVariable_fn(cond);
+ else
+ WakeConditionVariable_fn(cond);
+ return 0;
+}
+
+static int
+evthread_win32_condvar_wait(void *cond_, void *lock_, const struct timeval *tv)
+{
+ CONDITION_VARIABLE *cond = cond_;
+ CRITICAL_SECTION *lock = lock_;
+ DWORD ms, err;
+ BOOL result;
+
+ if (tv)
+ ms = evutil_tv_to_msec_(tv);
+ else
+ ms = INFINITE;
+ result = SleepConditionVariableCS_fn(cond, lock, ms);
+ if (result) {
+ if (GetLastError() == WAIT_TIMEOUT)
+ return 1;
+ else
+ return -1;
+ } else {
+ return 0;
+ }
+}
+#endif
+
+struct evthread_win32_cond {
+ HANDLE event;
+
+ CRITICAL_SECTION lock;
+ int n_waiting;
+ int n_to_wake;
+ int generation;
+};
+
+static void *
+evthread_win32_cond_alloc(unsigned flags)
+{
+ struct evthread_win32_cond *cond;
+ if (!(cond = mm_malloc(sizeof(struct evthread_win32_cond))))
+ return NULL;
+ if (InitializeCriticalSectionAndSpinCount(&cond->lock, SPIN_COUNT)==0) {
+ mm_free(cond);
+ return NULL;
+ }
+ if ((cond->event = CreateEvent(NULL,TRUE,FALSE,NULL)) == NULL) {
+ DeleteCriticalSection(&cond->lock);
+ mm_free(cond);
+ return NULL;
+ }
+ cond->n_waiting = cond->n_to_wake = cond->generation = 0;
+ return cond;
+}
+
+static void
+evthread_win32_cond_free(void *cond_)
+{
+ struct evthread_win32_cond *cond = cond_;
+ DeleteCriticalSection(&cond->lock);
+ CloseHandle(cond->event);
+ mm_free(cond);
+}
+
+static int
+evthread_win32_cond_signal(void *cond_, int broadcast)
+{
+ struct evthread_win32_cond *cond = cond_;
+ EnterCriticalSection(&cond->lock);
+ if (broadcast)
+ cond->n_to_wake = cond->n_waiting;
+ else
+ ++cond->n_to_wake;
+ cond->generation++;
+ SetEvent(cond->event);
+ LeaveCriticalSection(&cond->lock);
+ return 0;
+}
+
+static int
+evthread_win32_cond_wait(void *cond_, void *lock_, const struct timeval *tv)
+{
+ struct evthread_win32_cond *cond = cond_;
+ CRITICAL_SECTION *lock = lock_;
+ int generation_at_start;
+ int waiting = 1;
+ int result = -1;
+ DWORD ms = INFINITE, ms_orig = INFINITE, startTime, endTime;
+ if (tv)
+ ms_orig = ms = evutil_tv_to_msec_(tv);
+
+ EnterCriticalSection(&cond->lock);
+ ++cond->n_waiting;
+ generation_at_start = cond->generation;
+ LeaveCriticalSection(&cond->lock);
+
+ LeaveCriticalSection(lock);
+
+ startTime = GetTickCount();
+ do {
+ DWORD res;
+ res = WaitForSingleObject(cond->event, ms);
+ EnterCriticalSection(&cond->lock);
+ if (cond->n_to_wake &&
+ cond->generation != generation_at_start) {
+ --cond->n_to_wake;
+ --cond->n_waiting;
+ result = 0;
+ waiting = 0;
+ goto out;
+ } else if (res != WAIT_OBJECT_0) {
+ result = (res==WAIT_TIMEOUT) ? 1 : -1;
+ --cond->n_waiting;
+ waiting = 0;
+ goto out;
+ } else if (ms != INFINITE) {
+ endTime = GetTickCount();
+ if (startTime + ms_orig <= endTime) {
+ result = 1; /* Timeout */
+ --cond->n_waiting;
+ waiting = 0;
+ goto out;
+ } else {
+ ms = startTime + ms_orig - endTime;
+ }
+ }
+ /* If we make it here, we are still waiting. */
+ if (cond->n_to_wake == 0) {
+ /* There is nobody else who should wake up; reset
+ * the event. */
+ ResetEvent(cond->event);
+ }
+ out:
+ LeaveCriticalSection(&cond->lock);
+ } while (waiting);
+
+ EnterCriticalSection(lock);
+
+ EnterCriticalSection(&cond->lock);
+ if (!cond->n_waiting)
+ ResetEvent(cond->event);
+ LeaveCriticalSection(&cond->lock);
+
+ return result;
+}
+
+int
+evthread_use_windows_threads(void)
+{
+ struct evthread_lock_callbacks cbs = {
+ EVTHREAD_LOCK_API_VERSION,
+ EVTHREAD_LOCKTYPE_RECURSIVE,
+ evthread_win32_lock_create,
+ evthread_win32_lock_free,
+ evthread_win32_lock,
+ evthread_win32_unlock
+ };
+
+
+ struct evthread_condition_callbacks cond_cbs = {
+ EVTHREAD_CONDITION_API_VERSION,
+ evthread_win32_cond_alloc,
+ evthread_win32_cond_free,
+ evthread_win32_cond_signal,
+ evthread_win32_cond_wait
+ };
+#ifdef WIN32_HAVE_CONDITION_VARIABLES
+ struct evthread_condition_callbacks condvar_cbs = {
+ EVTHREAD_CONDITION_API_VERSION,
+ evthread_win32_condvar_alloc,
+ evthread_win32_condvar_free,
+ evthread_win32_condvar_signal,
+ evthread_win32_condvar_wait
+ };
+#endif
+
+ evthread_set_lock_callbacks(&cbs);
+ evthread_set_id_callback(evthread_win32_get_id);
+#ifdef WIN32_HAVE_CONDITION_VARIABLES
+ if (evthread_win32_condvar_init()) {
+ evthread_set_condition_callbacks(&condvar_cbs);
+ return 0;
+ }
+#endif
+ evthread_set_condition_callbacks(&cond_cbs);
+
+ return 0;
+}
+
diff --git a/libs/libevent/src/evutil.c b/libs/libevent/src/evutil.c
new file mode 100644
index 0000000000..495bfcc029
--- /dev/null
+++ b/libs/libevent/src/evutil.c
@@ -0,0 +1,2667 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#include <io.h>
+#include <tchar.h>
+#include <process.h>
+#undef _WIN32_WINNT
+/* For structs needed by GetAdaptersAddresses */
+#define _WIN32_WINNT 0x0501
+#include <iphlpapi.h>
+#endif
+
+#include <sys/types.h>
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef EVENT__HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+#ifdef EVENT__HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#include <errno.h>
+#include <limits.h>
+#include <stdio.h>
+#include <string.h>
+#ifdef EVENT__HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+#ifdef EVENT__HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif
+#ifdef EVENT__HAVE_NETINET_TCP_H
+#include <netinet/tcp.h>
+#endif
+#ifdef EVENT__HAVE_ARPA_INET_H
+#include <arpa/inet.h>
+#endif
+#include <time.h>
+#include <sys/stat.h>
+#ifdef EVENT__HAVE_IFADDRS_H
+#include <ifaddrs.h>
+#endif
+
+#include "event2/util.h"
+#include "util-internal.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "evthread-internal.h"
+
+#include "strlcpy-internal.h"
+#include "ipv6-internal.h"
+
+#ifdef _WIN32
+#define HT_NO_CACHE_HASH_VALUES
+#include "ht-internal.h"
+#define open _open
+#define read _read
+#define close _close
+#ifndef fstat
+#define fstat _fstati64
+#endif
+#ifndef stat
+#define stat _stati64
+#endif
+#define mode_t int
+#endif
+
+int
+evutil_open_closeonexec_(const char *pathname, int flags, unsigned mode)
+{
+ int fd;
+
+#ifdef O_CLOEXEC
+ fd = open(pathname, flags|O_CLOEXEC, (mode_t)mode);
+ if (fd >= 0 || errno == EINVAL)
+ return fd;
+ /* If we got an EINVAL, fall through and try without O_CLOEXEC */
+#endif
+ fd = open(pathname, flags, (mode_t)mode);
+ if (fd < 0)
+ return -1;
+
+#if defined(FD_CLOEXEC)
+ if (fcntl(fd, F_SETFD, FD_CLOEXEC) < 0) {
+ close(fd);
+ return -1;
+ }
+#endif
+
+ return fd;
+}
+
+/**
+ Read the contents of 'filename' into a newly allocated NUL-terminated
+ string. Set *content_out to hold this string, and *len_out to hold its
+ length (not including the appended NUL). If 'is_binary', open the file in
+ binary mode.
+
+ Returns 0 on success, -1 if the open fails, and -2 for all other failures.
+
+ Used internally only; may go away in a future version.
+ */
+int
+evutil_read_file_(const char *filename, char **content_out, size_t *len_out,
+ int is_binary)
+{
+ int fd, r;
+ struct stat st;
+ char *mem;
+ size_t read_so_far=0;
+ int mode = O_RDONLY;
+
+ EVUTIL_ASSERT(content_out);
+ EVUTIL_ASSERT(len_out);
+ *content_out = NULL;
+ *len_out = 0;
+
+#ifdef O_BINARY
+ if (is_binary)
+ mode |= O_BINARY;
+#endif
+
+ fd = evutil_open_closeonexec_(filename, mode, 0);
+ if (fd < 0)
+ return -1;
+ if (fstat(fd, &st) || st.st_size < 0 ||
+ st.st_size > EV_SSIZE_MAX-1 ) {
+ close(fd);
+ return -2;
+ }
+ mem = mm_malloc((size_t)st.st_size + 1);
+ if (!mem) {
+ close(fd);
+ return -2;
+ }
+ read_so_far = 0;
+#ifdef _WIN32
+#define N_TO_READ(x) ((x) > INT_MAX) ? INT_MAX : ((int)(x))
+#else
+#define N_TO_READ(x) (x)
+#endif
+ while ((r = read(fd, mem+read_so_far, N_TO_READ(st.st_size - read_so_far))) > 0) {
+ read_so_far += r;
+ if (read_so_far >= (size_t)st.st_size)
+ break;
+ EVUTIL_ASSERT(read_so_far < (size_t)st.st_size);
+ }
+ close(fd);
+ if (r < 0) {
+ mm_free(mem);
+ return -2;
+ }
+ mem[read_so_far] = 0;
+
+ *len_out = read_so_far;
+ *content_out = mem;
+ return 0;
+}
+
+int
+evutil_socketpair(int family, int type, int protocol, evutil_socket_t fd[2])
+{
+#ifndef _WIN32
+ return socketpair(family, type, protocol, fd);
+#else
+ return evutil_ersatz_socketpair_(family, type, protocol, fd);
+#endif
+}
+
+int
+evutil_ersatz_socketpair_(int family, int type, int protocol,
+ evutil_socket_t fd[2])
+{
+ /* This code is originally from Tor. Used with permission. */
+
+ /* This socketpair does not work when localhost is down. So
+ * it's really not the same thing at all. But it's close enough
+ * for now, and really, when localhost is down sometimes, we
+ * have other problems too.
+ */
+#ifdef _WIN32
+#define ERR(e) WSA##e
+#else
+#define ERR(e) e
+#endif
+ evutil_socket_t listener = -1;
+ evutil_socket_t connector = -1;
+ evutil_socket_t acceptor = -1;
+ struct sockaddr_in listen_addr;
+ struct sockaddr_in connect_addr;
+ ev_socklen_t size;
+ int saved_errno = -1;
+ int family_test;
+
+ family_test = family != AF_INET;
+#ifdef AF_UNIX
+ family_test = family_test && (family != AF_UNIX);
+#endif
+ if (protocol || family_test) {
+ EVUTIL_SET_SOCKET_ERROR(ERR(EAFNOSUPPORT));
+ return -1;
+ }
+
+ if (!fd) {
+ EVUTIL_SET_SOCKET_ERROR(ERR(EINVAL));
+ return -1;
+ }
+
+ listener = socket(AF_INET, type, 0);
+ if (listener < 0)
+ return -1;
+ memset(&listen_addr, 0, sizeof(listen_addr));
+ listen_addr.sin_family = AF_INET;
+ listen_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ listen_addr.sin_port = 0; /* kernel chooses port. */
+ if (bind(listener, (struct sockaddr *) &listen_addr, sizeof (listen_addr))
+ == -1)
+ goto tidy_up_and_fail;
+ if (listen(listener, 1) == -1)
+ goto tidy_up_and_fail;
+
+ connector = socket(AF_INET, type, 0);
+ if (connector < 0)
+ goto tidy_up_and_fail;
+
+ memset(&connect_addr, 0, sizeof(connect_addr));
+
+ /* We want to find out the port number to connect to. */
+ size = sizeof(connect_addr);
+ if (getsockname(listener, (struct sockaddr *) &connect_addr, &size) == -1)
+ goto tidy_up_and_fail;
+ if (size != sizeof (connect_addr))
+ goto abort_tidy_up_and_fail;
+ if (connect(connector, (struct sockaddr *) &connect_addr,
+ sizeof(connect_addr)) == -1)
+ goto tidy_up_and_fail;
+
+ size = sizeof(listen_addr);
+ acceptor = accept(listener, (struct sockaddr *) &listen_addr, &size);
+ if (acceptor < 0)
+ goto tidy_up_and_fail;
+ if (size != sizeof(listen_addr))
+ goto abort_tidy_up_and_fail;
+ /* Now check we are talking to ourself by matching port and host on the
+ two sockets. */
+ if (getsockname(connector, (struct sockaddr *) &connect_addr, &size) == -1)
+ goto tidy_up_and_fail;
+ if (size != sizeof (connect_addr)
+ || listen_addr.sin_family != connect_addr.sin_family
+ || listen_addr.sin_addr.s_addr != connect_addr.sin_addr.s_addr
+ || listen_addr.sin_port != connect_addr.sin_port)
+ goto abort_tidy_up_and_fail;
+ evutil_closesocket(listener);
+ fd[0] = connector;
+ fd[1] = acceptor;
+
+ return 0;
+
+ abort_tidy_up_and_fail:
+ saved_errno = ERR(ECONNABORTED);
+ tidy_up_and_fail:
+ if (saved_errno < 0)
+ saved_errno = EVUTIL_SOCKET_ERROR();
+ if (listener != -1)
+ evutil_closesocket(listener);
+ if (connector != -1)
+ evutil_closesocket(connector);
+ if (acceptor != -1)
+ evutil_closesocket(acceptor);
+
+ EVUTIL_SET_SOCKET_ERROR(saved_errno);
+ return -1;
+#undef ERR
+}
+
+int
+evutil_make_socket_nonblocking(evutil_socket_t fd)
+{
+#ifdef _WIN32
+ {
+ unsigned long nonblocking = 1;
+ if (ioctlsocket(fd, FIONBIO, &nonblocking) == SOCKET_ERROR) {
+ event_sock_warn(fd, "fcntl(%d, F_GETFL)", (int)fd);
+ return -1;
+ }
+ }
+#else
+ {
+ int flags;
+ if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) {
+ event_warn("fcntl(%d, F_GETFL)", fd);
+ return -1;
+ }
+ if (!(flags & O_NONBLOCK)) {
+ if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1) {
+ event_warn("fcntl(%d, F_SETFL)", fd);
+ return -1;
+ }
+ }
+ }
+#endif
+ return 0;
+}
+
+/* Faster version of evutil_make_socket_nonblocking for internal use.
+ *
+ * Requires that no F_SETFL flags were previously set on the fd.
+ */
+static int
+evutil_fast_socket_nonblocking(evutil_socket_t fd)
+{
+#ifdef _WIN32
+ return evutil_make_socket_nonblocking(fd);
+#else
+ if (fcntl(fd, F_SETFL, O_NONBLOCK) == -1) {
+ event_warn("fcntl(%d, F_SETFL)", fd);
+ return -1;
+ }
+ return 0;
+#endif
+}
+
+int
+evutil_make_listen_socket_reuseable(evutil_socket_t sock)
+{
+#if defined(SO_REUSEADDR) && !defined(_WIN32)
+ int one = 1;
+ /* REUSEADDR on Unix means, "don't hang on to this address after the
+ * listener is closed." On Windows, though, it means "don't keep other
+ * processes from binding to this address while we're using it. */
+ return setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (void*) &one,
+ (ev_socklen_t)sizeof(one));
+#else
+ return 0;
+#endif
+}
+
+int
+evutil_make_listen_socket_reuseable_port(evutil_socket_t sock)
+{
+#if defined __linux__ && defined(SO_REUSEPORT)
+ int one = 1;
+ /* REUSEPORT on Linux 3.9+ means, "Multiple servers (processes or
+ * threads) can bind to the same port if they each set the option. */
+ return setsockopt(sock, SOL_SOCKET, SO_REUSEPORT, (void*) &one,
+ (ev_socklen_t)sizeof(one));
+#else
+ return 0;
+#endif
+}
+
+int
+evutil_make_tcp_listen_socket_deferred(evutil_socket_t sock)
+{
+#if defined(EVENT__HAVE_NETINET_TCP_H) && defined(TCP_DEFER_ACCEPT)
+ int one = 1;
+
+ /* TCP_DEFER_ACCEPT tells the kernel to call defer accept() only after data
+ * has arrived and ready to read */
+ return setsockopt(sock, IPPROTO_TCP, TCP_DEFER_ACCEPT, &one,
+ (ev_socklen_t)sizeof(one));
+#endif
+ return 0;
+}
+
+int
+evutil_make_socket_closeonexec(evutil_socket_t fd)
+{
+#if !defined(_WIN32) && defined(EVENT__HAVE_SETFD)
+ int flags;
+ if ((flags = fcntl(fd, F_GETFD, NULL)) < 0) {
+ event_warn("fcntl(%d, F_GETFD)", fd);
+ return -1;
+ }
+ if (!(flags & FD_CLOEXEC)) {
+ if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) == -1) {
+ event_warn("fcntl(%d, F_SETFD)", fd);
+ return -1;
+ }
+ }
+#endif
+ return 0;
+}
+
+/* Faster version of evutil_make_socket_closeonexec for internal use.
+ *
+ * Requires that no F_SETFD flags were previously set on the fd.
+ */
+static int
+evutil_fast_socket_closeonexec(evutil_socket_t fd)
+{
+#if !defined(_WIN32) && defined(EVENT__HAVE_SETFD)
+ if (fcntl(fd, F_SETFD, FD_CLOEXEC) == -1) {
+ event_warn("fcntl(%d, F_SETFD)", fd);
+ return -1;
+ }
+#endif
+ return 0;
+}
+
+int
+evutil_closesocket(evutil_socket_t sock)
+{
+#ifndef _WIN32
+ return close(sock);
+#else
+ return closesocket(sock);
+#endif
+}
+
+ev_int64_t
+evutil_strtoll(const char *s, char **endptr, int base)
+{
+#ifdef EVENT__HAVE_STRTOLL
+ return (ev_int64_t)strtoll(s, endptr, base);
+#elif EVENT__SIZEOF_LONG == 8
+ return (ev_int64_t)strtol(s, endptr, base);
+#elif defined(_WIN32) && defined(_MSC_VER) && _MSC_VER < 1300
+ /* XXXX on old versions of MS APIs, we only support base
+ * 10. */
+ ev_int64_t r;
+ if (base != 10)
+ return 0;
+ r = (ev_int64_t) _atoi64(s);
+ while (isspace(*s))
+ ++s;
+ if (*s == '-')
+ ++s;
+ while (isdigit(*s))
+ ++s;
+ if (endptr)
+ *endptr = (char*) s;
+ return r;
+#elif defined(_WIN32)
+ return (ev_int64_t) _strtoi64(s, endptr, base);
+#elif defined(EVENT__SIZEOF_LONG_LONG) && EVENT__SIZEOF_LONG_LONG == 8
+ long long r;
+ int n;
+ if (base != 10 && base != 16)
+ return 0;
+ if (base == 10) {
+ n = sscanf(s, "%lld", &r);
+ } else {
+ unsigned long long ru=0;
+ n = sscanf(s, "%llx", &ru);
+ if (ru > EV_INT64_MAX)
+ return 0;
+ r = (long long) ru;
+ }
+ if (n != 1)
+ return 0;
+ while (EVUTIL_ISSPACE_(*s))
+ ++s;
+ if (*s == '-')
+ ++s;
+ if (base == 10) {
+ while (EVUTIL_ISDIGIT_(*s))
+ ++s;
+ } else {
+ while (EVUTIL_ISXDIGIT_(*s))
+ ++s;
+ }
+ if (endptr)
+ *endptr = (char*) s;
+ return r;
+#else
+#error "I don't know how to parse 64-bit integers."
+#endif
+}
+
+#ifdef _WIN32
+int
+evutil_socket_geterror(evutil_socket_t sock)
+{
+ int optval, optvallen=sizeof(optval);
+ int err = WSAGetLastError();
+ if (err == WSAEWOULDBLOCK && sock >= 0) {
+ if (getsockopt(sock, SOL_SOCKET, SO_ERROR, (void*)&optval,
+ &optvallen))
+ return err;
+ if (optval)
+ return optval;
+ }
+ return err;
+}
+#endif
+
+/* XXX we should use an enum here. */
+/* 2 for connection refused, 1 for connected, 0 for not yet, -1 for error. */
+int
+evutil_socket_connect_(evutil_socket_t *fd_ptr, const struct sockaddr *sa, int socklen)
+{
+ int made_fd = 0;
+
+ if (*fd_ptr < 0) {
+ if ((*fd_ptr = socket(sa->sa_family, SOCK_STREAM, 0)) < 0)
+ goto err;
+ made_fd = 1;
+ if (evutil_make_socket_nonblocking(*fd_ptr) < 0) {
+ goto err;
+ }
+ }
+
+ if (connect(*fd_ptr, sa, socklen) < 0) {
+ int e = evutil_socket_geterror(*fd_ptr);
+ if (EVUTIL_ERR_CONNECT_RETRIABLE(e))
+ return 0;
+ if (EVUTIL_ERR_CONNECT_REFUSED(e))
+ return 2;
+ goto err;
+ } else {
+ return 1;
+ }
+
+err:
+ if (made_fd) {
+ evutil_closesocket(*fd_ptr);
+ *fd_ptr = -1;
+ }
+ return -1;
+}
+
+/* Check whether a socket on which we called connect() is done
+ connecting. Return 1 for connected, 0 for not yet, -1 for error. In the
+ error case, set the current socket errno to the error that happened during
+ the connect operation. */
+int
+evutil_socket_finished_connecting_(evutil_socket_t fd)
+{
+ int e;
+ ev_socklen_t elen = sizeof(e);
+
+ if (getsockopt(fd, SOL_SOCKET, SO_ERROR, (void*)&e, &elen) < 0)
+ return -1;
+
+ if (e) {
+ if (EVUTIL_ERR_CONNECT_RETRIABLE(e))
+ return 0;
+ EVUTIL_SET_SOCKET_ERROR(e);
+ return -1;
+ }
+
+ return 1;
+}
+
+#if (EVUTIL_AI_PASSIVE|EVUTIL_AI_CANONNAME|EVUTIL_AI_NUMERICHOST| \
+ EVUTIL_AI_NUMERICSERV|EVUTIL_AI_V4MAPPED|EVUTIL_AI_ALL| \
+ EVUTIL_AI_ADDRCONFIG) != \
+ (EVUTIL_AI_PASSIVE^EVUTIL_AI_CANONNAME^EVUTIL_AI_NUMERICHOST^ \
+ EVUTIL_AI_NUMERICSERV^EVUTIL_AI_V4MAPPED^EVUTIL_AI_ALL^ \
+ EVUTIL_AI_ADDRCONFIG)
+#error "Some of our EVUTIL_AI_* flags seem to overlap with system AI_* flags"
+#endif
+
+/* We sometimes need to know whether we have an ipv4 address and whether we
+ have an ipv6 address. If 'have_checked_interfaces', then we've already done
+ the test. If 'had_ipv4_address', then it turns out we had an ipv4 address.
+ If 'had_ipv6_address', then it turns out we had an ipv6 address. These are
+ set by evutil_check_interfaces. */
+static int have_checked_interfaces, had_ipv4_address, had_ipv6_address;
+
+/* Macro: True iff the IPv4 address 'addr', in host order, is in 127.0.0.0/8
+ */
+#define EVUTIL_V4ADDR_IS_LOCALHOST(addr) (((addr)>>24) == 127)
+
+/* Macro: True iff the IPv4 address 'addr', in host order, is a class D
+ * (multiclass) address.
+ */
+#define EVUTIL_V4ADDR_IS_CLASSD(addr) ((((addr)>>24) & 0xf0) == 0xe0)
+
+static void
+evutil_found_ifaddr(const struct sockaddr *sa)
+{
+ const char ZEROES[] = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00";
+
+ if (sa->sa_family == AF_INET) {
+ const struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+ ev_uint32_t addr = ntohl(sin->sin_addr.s_addr);
+ if (addr == 0 ||
+ EVUTIL_V4ADDR_IS_LOCALHOST(addr) ||
+ EVUTIL_V4ADDR_IS_CLASSD(addr)) {
+ /* Not actually a usable external address. */
+ } else {
+ event_debug(("Detected an IPv4 interface"));
+ had_ipv4_address = 1;
+ }
+ } else if (sa->sa_family == AF_INET6) {
+ const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
+ const unsigned char *addr =
+ (unsigned char*)sin6->sin6_addr.s6_addr;
+ if (!memcmp(addr, ZEROES, 8) ||
+ ((addr[0] & 0xfe) == 0xfc) ||
+ (addr[0] == 0xfe && (addr[1] & 0xc0) == 0x80) ||
+ (addr[0] == 0xfe && (addr[1] & 0xc0) == 0xc0) ||
+ (addr[0] == 0xff)) {
+ /* This is a reserved, ipv4compat, ipv4map, loopback,
+ * link-local, multicast, or unspecified address. */
+ } else {
+ event_debug(("Detected an IPv6 interface"));
+ had_ipv6_address = 1;
+ }
+ }
+}
+
+#ifdef _WIN32
+typedef ULONG (WINAPI *GetAdaptersAddresses_fn_t)(
+ ULONG, ULONG, PVOID, PIP_ADAPTER_ADDRESSES, PULONG);
+#endif
+
+static int
+evutil_check_ifaddrs(void)
+{
+#if defined(EVENT__HAVE_GETIFADDRS)
+ /* Most free Unixy systems provide getifaddrs, which gives us a linked list
+ * of struct ifaddrs. */
+ struct ifaddrs *ifa = NULL;
+ const struct ifaddrs *i;
+ if (getifaddrs(&ifa) < 0) {
+ event_warn("Unable to call getifaddrs()");
+ return -1;
+ }
+
+ for (i = ifa; i; i = i->ifa_next) {
+ if (!i->ifa_addr)
+ continue;
+ evutil_found_ifaddr(i->ifa_addr);
+ }
+
+ freeifaddrs(ifa);
+ return 0;
+#elif defined(_WIN32)
+ /* Windows XP began to provide GetAdaptersAddresses. Windows 2000 had a
+ "GetAdaptersInfo", but that's deprecated; let's just try
+ GetAdaptersAddresses and fall back to connect+getsockname.
+ */
+ HMODULE lib = evutil_load_windows_system_library_(TEXT("ihplapi.dll"));
+ GetAdaptersAddresses_fn_t fn;
+ ULONG size, res;
+ IP_ADAPTER_ADDRESSES *addresses = NULL, *address;
+ int result = -1;
+
+#define FLAGS (GAA_FLAG_SKIP_ANYCAST | \
+ GAA_FLAG_SKIP_MULTICAST | \
+ GAA_FLAG_SKIP_DNS_SERVER)
+
+ if (!lib)
+ goto done;
+
+ if (!(fn = (GetAdaptersAddresses_fn_t) GetProcAddress(lib, "GetAdaptersAddresses")))
+ goto done;
+
+ /* Guess how much space we need. */
+ size = 15*1024;
+ addresses = mm_malloc(size);
+ if (!addresses)
+ goto done;
+ res = fn(AF_UNSPEC, FLAGS, NULL, addresses, &size);
+ if (res == ERROR_BUFFER_OVERFLOW) {
+ /* we didn't guess that we needed enough space; try again */
+ mm_free(addresses);
+ addresses = mm_malloc(size);
+ if (!addresses)
+ goto done;
+ res = fn(AF_UNSPEC, FLAGS, NULL, addresses, &size);
+ }
+ if (res != NO_ERROR)
+ goto done;
+
+ for (address = addresses; address; address = address->Next) {
+ IP_ADAPTER_UNICAST_ADDRESS *a;
+ for (a = address->FirstUnicastAddress; a; a = a->Next) {
+ /* Yes, it's a linked list inside a linked list */
+ struct sockaddr *sa = a->Address.lpSockaddr;
+ evutil_found_ifaddr(sa);
+ }
+ }
+
+ result = 0;
+done:
+ if (lib)
+ FreeLibrary(lib);
+ if (addresses)
+ mm_free(addresses);
+ return result;
+#else
+ return -1;
+#endif
+}
+
+/* Test whether we have an ipv4 interface and an ipv6 interface. Return 0 if
+ * the test seemed successful. */
+static int
+evutil_check_interfaces(int force_recheck)
+{
+ evutil_socket_t fd = -1;
+ struct sockaddr_in sin, sin_out;
+ struct sockaddr_in6 sin6, sin6_out;
+ ev_socklen_t sin_out_len = sizeof(sin_out);
+ ev_socklen_t sin6_out_len = sizeof(sin6_out);
+ int r;
+ if (have_checked_interfaces && !force_recheck)
+ return 0;
+
+ if (evutil_check_ifaddrs() == 0) {
+ /* Use a nice sane interface, if this system has one. */
+ return 0;
+ }
+
+ /* Ugh. There was no nice sane interface. So to check whether we have
+ * an interface open for a given protocol, will try to make a UDP
+ * 'connection' to a remote host on the internet. We don't actually
+ * use it, so the address doesn't matter, but we want to pick one that
+ * keep us from using a host- or link-local interface. */
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(53);
+ r = evutil_inet_pton(AF_INET, "18.244.0.188", &sin.sin_addr);
+ EVUTIL_ASSERT(r);
+
+ memset(&sin6, 0, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = htons(53);
+ r = evutil_inet_pton(AF_INET6, "2001:4860:b002::68", &sin6.sin6_addr);
+ EVUTIL_ASSERT(r);
+
+ memset(&sin_out, 0, sizeof(sin_out));
+ memset(&sin6_out, 0, sizeof(sin6_out));
+
+ /* XXX some errnos mean 'no address'; some mean 'not enough sockets'. */
+ if ((fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) >= 0 &&
+ connect(fd, (struct sockaddr*)&sin, sizeof(sin)) == 0 &&
+ getsockname(fd, (struct sockaddr*)&sin_out, &sin_out_len) == 0) {
+ /* We might have an IPv4 interface. */
+ evutil_found_ifaddr((struct sockaddr*) &sin_out);
+ }
+ if (fd >= 0)
+ evutil_closesocket(fd);
+
+ if ((fd = socket(AF_INET6, SOCK_DGRAM, IPPROTO_UDP)) >= 0 &&
+ connect(fd, (struct sockaddr*)&sin6, sizeof(sin6)) == 0 &&
+ getsockname(fd, (struct sockaddr*)&sin6_out, &sin6_out_len) == 0) {
+ /* We might have an IPv6 interface. */
+ evutil_found_ifaddr((struct sockaddr*) &sin6_out);
+ }
+
+ if (fd >= 0)
+ evutil_closesocket(fd);
+
+ return 0;
+}
+
+/* Internal addrinfo flag. This one is set when we allocate the addrinfo from
+ * inside libevent. Otherwise, the built-in getaddrinfo() function allocated
+ * it, and we should trust what they said.
+ **/
+#define EVUTIL_AI_LIBEVENT_ALLOCATED 0x80000000
+
+/* Helper: construct a new addrinfo containing the socket address in
+ * 'sa', which must be a sockaddr_in or a sockaddr_in6. Take the
+ * socktype and protocol info from hints. If they weren't set, then
+ * allocate both a TCP and a UDP addrinfo.
+ */
+struct evutil_addrinfo *
+evutil_new_addrinfo_(struct sockaddr *sa, ev_socklen_t socklen,
+ const struct evutil_addrinfo *hints)
+{
+ struct evutil_addrinfo *res;
+ EVUTIL_ASSERT(hints);
+
+ if (hints->ai_socktype == 0 && hints->ai_protocol == 0) {
+ /* Indecisive user! Give them a UDP and a TCP. */
+ struct evutil_addrinfo *r1, *r2;
+ struct evutil_addrinfo tmp;
+ memcpy(&tmp, hints, sizeof(tmp));
+ tmp.ai_socktype = SOCK_STREAM; tmp.ai_protocol = IPPROTO_TCP;
+ r1 = evutil_new_addrinfo_(sa, socklen, &tmp);
+ if (!r1)
+ return NULL;
+ tmp.ai_socktype = SOCK_DGRAM; tmp.ai_protocol = IPPROTO_UDP;
+ r2 = evutil_new_addrinfo_(sa, socklen, &tmp);
+ if (!r2) {
+ evutil_freeaddrinfo(r1);
+ return NULL;
+ }
+ r1->ai_next = r2;
+ return r1;
+ }
+
+ /* We're going to allocate extra space to hold the sockaddr. */
+ res = mm_calloc(1,sizeof(struct evutil_addrinfo)+socklen);
+ if (!res)
+ return NULL;
+ res->ai_addr = (struct sockaddr*)
+ (((char*)res) + sizeof(struct evutil_addrinfo));
+ memcpy(res->ai_addr, sa, socklen);
+ res->ai_addrlen = socklen;
+ res->ai_family = sa->sa_family; /* Same or not? XXX */
+ res->ai_flags = EVUTIL_AI_LIBEVENT_ALLOCATED;
+ res->ai_socktype = hints->ai_socktype;
+ res->ai_protocol = hints->ai_protocol;
+
+ return res;
+}
+
+/* Append the addrinfo 'append' to the end of 'first', and return the start of
+ * the list. Either element can be NULL, in which case we return the element
+ * that is not NULL. */
+struct evutil_addrinfo *
+evutil_addrinfo_append_(struct evutil_addrinfo *first,
+ struct evutil_addrinfo *append)
+{
+ struct evutil_addrinfo *ai = first;
+ if (!ai)
+ return append;
+ while (ai->ai_next)
+ ai = ai->ai_next;
+ ai->ai_next = append;
+
+ return first;
+}
+
+static int
+parse_numeric_servname(const char *servname)
+{
+ int n;
+ char *endptr=NULL;
+ n = (int) strtol(servname, &endptr, 10);
+ if (n>=0 && n <= 65535 && servname[0] && endptr && !endptr[0])
+ return n;
+ else
+ return -1;
+}
+
+/** Parse a service name in 'servname', which can be a decimal port.
+ * Return the port number, or -1 on error.
+ */
+static int
+evutil_parse_servname(const char *servname, const char *protocol,
+ const struct evutil_addrinfo *hints)
+{
+ int n = parse_numeric_servname(servname);
+ if (n>=0)
+ return n;
+#if defined(EVENT__HAVE_GETSERVBYNAME) || defined(_WIN32)
+ if (!(hints->ai_flags & EVUTIL_AI_NUMERICSERV)) {
+ struct servent *ent = getservbyname(servname, protocol);
+ if (ent) {
+ return ntohs(ent->s_port);
+ }
+ }
+#endif
+ return -1;
+}
+
+/* Return a string corresponding to a protocol number that we can pass to
+ * getservyname. */
+static const char *
+evutil_unparse_protoname(int proto)
+{
+ switch (proto) {
+ case 0:
+ return NULL;
+ case IPPROTO_TCP:
+ return "tcp";
+ case IPPROTO_UDP:
+ return "udp";
+#ifdef IPPROTO_SCTP
+ case IPPROTO_SCTP:
+ return "sctp";
+#endif
+ default:
+#ifdef EVENT__HAVE_GETPROTOBYNUMBER
+ {
+ struct protoent *ent = getprotobynumber(proto);
+ if (ent)
+ return ent->p_name;
+ }
+#endif
+ return NULL;
+ }
+}
+
+static void
+evutil_getaddrinfo_infer_protocols(struct evutil_addrinfo *hints)
+{
+ /* If we can guess the protocol from the socktype, do so. */
+ if (!hints->ai_protocol && hints->ai_socktype) {
+ if (hints->ai_socktype == SOCK_DGRAM)
+ hints->ai_protocol = IPPROTO_UDP;
+ else if (hints->ai_socktype == SOCK_STREAM)
+ hints->ai_protocol = IPPROTO_TCP;
+ }
+
+ /* Set the socktype if it isn't set. */
+ if (!hints->ai_socktype && hints->ai_protocol) {
+ if (hints->ai_protocol == IPPROTO_UDP)
+ hints->ai_socktype = SOCK_DGRAM;
+ else if (hints->ai_protocol == IPPROTO_TCP)
+ hints->ai_socktype = SOCK_STREAM;
+#ifdef IPPROTO_SCTP
+ else if (hints->ai_protocol == IPPROTO_SCTP)
+ hints->ai_socktype = SOCK_STREAM;
+#endif
+ }
+}
+
+#if AF_UNSPEC != PF_UNSPEC
+#error "I cannot build on a system where AF_UNSPEC != PF_UNSPEC"
+#endif
+
+/** Implements the part of looking up hosts by name that's common to both
+ * the blocking and nonblocking resolver:
+ * - Adjust 'hints' to have a reasonable socktype and protocol.
+ * - Look up the port based on 'servname', and store it in *portnum,
+ * - Handle the nodename==NULL case
+ * - Handle some invalid arguments cases.
+ * - Handle the cases where nodename is an IPv4 or IPv6 address.
+ *
+ * If we need the resolver to look up the hostname, we return
+ * EVUTIL_EAI_NEED_RESOLVE. Otherwise, we can completely implement
+ * getaddrinfo: we return 0 or an appropriate EVUTIL_EAI_* error, and
+ * set *res as getaddrinfo would.
+ */
+int
+evutil_getaddrinfo_common_(const char *nodename, const char *servname,
+ struct evutil_addrinfo *hints, struct evutil_addrinfo **res, int *portnum)
+{
+ int port = 0;
+ const char *pname;
+
+ if (nodename == NULL && servname == NULL)
+ return EVUTIL_EAI_NONAME;
+
+ /* We only understand 3 families */
+ if (hints->ai_family != PF_UNSPEC && hints->ai_family != PF_INET &&
+ hints->ai_family != PF_INET6)
+ return EVUTIL_EAI_FAMILY;
+
+ evutil_getaddrinfo_infer_protocols(hints);
+
+ /* Look up the port number and protocol, if possible. */
+ pname = evutil_unparse_protoname(hints->ai_protocol);
+ if (servname) {
+ /* XXXX We could look at the protocol we got back from
+ * getservbyname, but it doesn't seem too useful. */
+ port = evutil_parse_servname(servname, pname, hints);
+ if (port < 0) {
+ return EVUTIL_EAI_NONAME;
+ }
+ }
+
+ /* If we have no node name, then we're supposed to bind to 'any' and
+ * connect to localhost. */
+ if (nodename == NULL) {
+ struct evutil_addrinfo *res4=NULL, *res6=NULL;
+ if (hints->ai_family != PF_INET) { /* INET6 or UNSPEC. */
+ struct sockaddr_in6 sin6;
+ memset(&sin6, 0, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = htons(port);
+ if (hints->ai_flags & EVUTIL_AI_PASSIVE) {
+ /* Bind to :: */
+ } else {
+ /* connect to ::1 */
+ sin6.sin6_addr.s6_addr[15] = 1;
+ }
+ res6 = evutil_new_addrinfo_((struct sockaddr*)&sin6,
+ sizeof(sin6), hints);
+ if (!res6)
+ return EVUTIL_EAI_MEMORY;
+ }
+
+ if (hints->ai_family != PF_INET6) { /* INET or UNSPEC */
+ struct sockaddr_in sin;
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(port);
+ if (hints->ai_flags & EVUTIL_AI_PASSIVE) {
+ /* Bind to 0.0.0.0 */
+ } else {
+ /* connect to 127.0.0.1 */
+ sin.sin_addr.s_addr = htonl(0x7f000001);
+ }
+ res4 = evutil_new_addrinfo_((struct sockaddr*)&sin,
+ sizeof(sin), hints);
+ if (!res4) {
+ if (res6)
+ evutil_freeaddrinfo(res6);
+ return EVUTIL_EAI_MEMORY;
+ }
+ }
+ *res = evutil_addrinfo_append_(res4, res6);
+ return 0;
+ }
+
+ /* If we can, we should try to parse the hostname without resolving
+ * it. */
+ /* Try ipv6. */
+ if (hints->ai_family == PF_INET6 || hints->ai_family == PF_UNSPEC) {
+ struct sockaddr_in6 sin6;
+ memset(&sin6, 0, sizeof(sin6));
+ if (1==evutil_inet_pton(AF_INET6, nodename, &sin6.sin6_addr)) {
+ /* Got an ipv6 address. */
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = htons(port);
+ *res = evutil_new_addrinfo_((struct sockaddr*)&sin6,
+ sizeof(sin6), hints);
+ if (!*res)
+ return EVUTIL_EAI_MEMORY;
+ return 0;
+ }
+ }
+
+ /* Try ipv4. */
+ if (hints->ai_family == PF_INET || hints->ai_family == PF_UNSPEC) {
+ struct sockaddr_in sin;
+ memset(&sin, 0, sizeof(sin));
+ if (1==evutil_inet_pton(AF_INET, nodename, &sin.sin_addr)) {
+ /* Got an ipv6 address. */
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(port);
+ *res = evutil_new_addrinfo_((struct sockaddr*)&sin,
+ sizeof(sin), hints);
+ if (!*res)
+ return EVUTIL_EAI_MEMORY;
+ return 0;
+ }
+ }
+
+
+ /* If we have reached this point, we definitely need to do a DNS
+ * lookup. */
+ if ((hints->ai_flags & EVUTIL_AI_NUMERICHOST)) {
+ /* If we're not allowed to do one, then say so. */
+ return EVUTIL_EAI_NONAME;
+ }
+ *portnum = port;
+ return EVUTIL_EAI_NEED_RESOLVE;
+}
+
+#ifdef EVENT__HAVE_GETADDRINFO
+#define USE_NATIVE_GETADDRINFO
+#endif
+
+#ifdef USE_NATIVE_GETADDRINFO
+/* A mask of all the flags that we declare, so we can clear them before calling
+ * the native getaddrinfo */
+static const unsigned int ALL_NONNATIVE_AI_FLAGS =
+#ifndef AI_PASSIVE
+ EVUTIL_AI_PASSIVE |
+#endif
+#ifndef AI_CANONNAME
+ EVUTIL_AI_CANONNAME |
+#endif
+#ifndef AI_NUMERICHOST
+ EVUTIL_AI_NUMERICHOST |
+#endif
+#ifndef AI_NUMERICSERV
+ EVUTIL_AI_NUMERICSERV |
+#endif
+#ifndef AI_ADDRCONFIG
+ EVUTIL_AI_ADDRCONFIG |
+#endif
+#ifndef AI_ALL
+ EVUTIL_AI_ALL |
+#endif
+#ifndef AI_V4MAPPED
+ EVUTIL_AI_V4MAPPED |
+#endif
+ EVUTIL_AI_LIBEVENT_ALLOCATED;
+
+static const unsigned int ALL_NATIVE_AI_FLAGS =
+#ifdef AI_PASSIVE
+ AI_PASSIVE |
+#endif
+#ifdef AI_CANONNAME
+ AI_CANONNAME |
+#endif
+#ifdef AI_NUMERICHOST
+ AI_NUMERICHOST |
+#endif
+#ifdef AI_NUMERICSERV
+ AI_NUMERICSERV |
+#endif
+#ifdef AI_ADDRCONFIG
+ AI_ADDRCONFIG |
+#endif
+#ifdef AI_ALL
+ AI_ALL |
+#endif
+#ifdef AI_V4MAPPED
+ AI_V4MAPPED |
+#endif
+ 0;
+#endif
+
+#ifndef USE_NATIVE_GETADDRINFO
+/* Helper for systems with no getaddrinfo(): make one or more addrinfos out of
+ * a struct hostent.
+ */
+static struct evutil_addrinfo *
+addrinfo_from_hostent(const struct hostent *ent,
+ int port, const struct evutil_addrinfo *hints)
+{
+ int i;
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ struct sockaddr *sa;
+ int socklen;
+ struct evutil_addrinfo *res=NULL, *ai;
+ void *addrp;
+
+ if (ent->h_addrtype == PF_INET) {
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(port);
+ sa = (struct sockaddr *)&sin;
+ socklen = sizeof(struct sockaddr_in);
+ addrp = &sin.sin_addr;
+ if (ent->h_length != sizeof(sin.sin_addr)) {
+ event_warnx("Weird h_length from gethostbyname");
+ return NULL;
+ }
+ } else if (ent->h_addrtype == PF_INET6) {
+ memset(&sin6, 0, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = htons(port);
+ sa = (struct sockaddr *)&sin6;
+ socklen = sizeof(struct sockaddr_in6);
+ addrp = &sin6.sin6_addr;
+ if (ent->h_length != sizeof(sin6.sin6_addr)) {
+ event_warnx("Weird h_length from gethostbyname");
+ return NULL;
+ }
+ } else
+ return NULL;
+
+ for (i = 0; ent->h_addr_list[i]; ++i) {
+ memcpy(addrp, ent->h_addr_list[i], ent->h_length);
+ ai = evutil_new_addrinfo_(sa, socklen, hints);
+ if (!ai) {
+ evutil_freeaddrinfo(res);
+ return NULL;
+ }
+ res = evutil_addrinfo_append_(res, ai);
+ }
+
+ if (res && ((hints->ai_flags & EVUTIL_AI_CANONNAME) && ent->h_name)) {
+ res->ai_canonname = mm_strdup(ent->h_name);
+ if (res->ai_canonname == NULL) {
+ evutil_freeaddrinfo(res);
+ return NULL;
+ }
+ }
+
+ return res;
+}
+#endif
+
+/* If the EVUTIL_AI_ADDRCONFIG flag is set on hints->ai_flags, and
+ * hints->ai_family is PF_UNSPEC, then revise the value of hints->ai_family so
+ * that we'll only get addresses we could maybe connect to.
+ */
+void
+evutil_adjust_hints_for_addrconfig_(struct evutil_addrinfo *hints)
+{
+ if (!(hints->ai_flags & EVUTIL_AI_ADDRCONFIG))
+ return;
+ if (hints->ai_family != PF_UNSPEC)
+ return;
+ if (!have_checked_interfaces)
+ evutil_check_interfaces(0);
+ if (had_ipv4_address && !had_ipv6_address) {
+ hints->ai_family = PF_INET;
+ } else if (!had_ipv4_address && had_ipv6_address) {
+ hints->ai_family = PF_INET6;
+ }
+}
+
+#ifdef USE_NATIVE_GETADDRINFO
+static int need_numeric_port_hack_=0;
+static int need_socktype_protocol_hack_=0;
+static int tested_for_getaddrinfo_hacks=0;
+
+/* Some older BSDs (like OpenBSD up to 4.6) used to believe that
+ giving a numeric port without giving an ai_socktype was verboten.
+ We test for this so we can apply an appropriate workaround. If it
+ turns out that the bug is present, then:
+
+ - If nodename==NULL and servname is numeric, we build an answer
+ ourselves using evutil_getaddrinfo_common_().
+
+ - If nodename!=NULL and servname is numeric, then we set
+ servname=NULL when calling getaddrinfo, and post-process the
+ result to set the ports on it.
+
+ We test for this bug at runtime, since otherwise we can't have the
+ same binary run on multiple BSD versions.
+
+ - Some versions of Solaris believe that it's nice to leave to protocol
+ field set to 0. We test for this so we can apply an appropriate
+ workaround.
+*/
+static void
+test_for_getaddrinfo_hacks(void)
+{
+ int r, r2;
+ struct evutil_addrinfo *ai=NULL, *ai2=NULL;
+ struct evutil_addrinfo hints;
+
+ memset(&hints,0,sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_flags =
+#ifdef AI_NUMERICHOST
+ AI_NUMERICHOST |
+#endif
+#ifdef AI_NUMERICSERV
+ AI_NUMERICSERV |
+#endif
+ 0;
+ r = getaddrinfo("1.2.3.4", "80", &hints, &ai);
+ hints.ai_socktype = SOCK_STREAM;
+ r2 = getaddrinfo("1.2.3.4", "80", &hints, &ai2);
+ if (r2 == 0 && r != 0) {
+ need_numeric_port_hack_=1;
+ }
+ if (ai2 && ai2->ai_protocol == 0) {
+ need_socktype_protocol_hack_=1;
+ }
+
+ if (ai)
+ freeaddrinfo(ai);
+ if (ai2)
+ freeaddrinfo(ai2);
+ tested_for_getaddrinfo_hacks=1;
+}
+
+static inline int
+need_numeric_port_hack(void)
+{
+ if (!tested_for_getaddrinfo_hacks)
+ test_for_getaddrinfo_hacks();
+ return need_numeric_port_hack_;
+}
+
+static inline int
+need_socktype_protocol_hack(void)
+{
+ if (!tested_for_getaddrinfo_hacks)
+ test_for_getaddrinfo_hacks();
+ return need_socktype_protocol_hack_;
+}
+
+static void
+apply_numeric_port_hack(int port, struct evutil_addrinfo **ai)
+{
+ /* Now we run through the list and set the ports on all of the
+ * results where ports would make sense. */
+ for ( ; *ai; ai = &(*ai)->ai_next) {
+ struct sockaddr *sa = (*ai)->ai_addr;
+ if (sa && sa->sa_family == AF_INET) {
+ struct sockaddr_in *sin = (struct sockaddr_in*)sa;
+ sin->sin_port = htons(port);
+ } else if (sa && sa->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)sa;
+ sin6->sin6_port = htons(port);
+ } else {
+ /* A numeric port makes no sense here; remove this one
+ * from the list. */
+ struct evutil_addrinfo *victim = *ai;
+ *ai = victim->ai_next;
+ victim->ai_next = NULL;
+ freeaddrinfo(victim);
+ }
+ }
+}
+
+static int
+apply_socktype_protocol_hack(struct evutil_addrinfo *ai)
+{
+ struct evutil_addrinfo *ai_new;
+ for (; ai; ai = ai->ai_next) {
+ evutil_getaddrinfo_infer_protocols(ai);
+ if (ai->ai_socktype || ai->ai_protocol)
+ continue;
+ ai_new = mm_malloc(sizeof(*ai_new));
+ if (!ai_new)
+ return -1;
+ memcpy(ai_new, ai, sizeof(*ai_new));
+ ai->ai_socktype = SOCK_STREAM;
+ ai->ai_protocol = IPPROTO_TCP;
+ ai_new->ai_socktype = SOCK_DGRAM;
+ ai_new->ai_protocol = IPPROTO_UDP;
+
+ ai_new->ai_next = ai->ai_next;
+ ai->ai_next = ai_new;
+ }
+ return 0;
+}
+#endif
+
+int
+evutil_getaddrinfo(const char *nodename, const char *servname,
+ const struct evutil_addrinfo *hints_in, struct evutil_addrinfo **res)
+{
+#ifdef USE_NATIVE_GETADDRINFO
+ struct evutil_addrinfo hints;
+ int portnum=-1, need_np_hack, err;
+
+ if (hints_in) {
+ memcpy(&hints, hints_in, sizeof(hints));
+ } else {
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ }
+
+#ifndef AI_ADDRCONFIG
+ /* Not every system has AI_ADDRCONFIG, so fake it. */
+ if (hints.ai_family == PF_UNSPEC &&
+ (hints.ai_flags & EVUTIL_AI_ADDRCONFIG)) {
+ evutil_adjust_hints_for_addrconfig_(&hints);
+ }
+#endif
+
+#ifndef AI_NUMERICSERV
+ /* Not every system has AI_NUMERICSERV, so fake it. */
+ if (hints.ai_flags & EVUTIL_AI_NUMERICSERV) {
+ if (servname && parse_numeric_servname(servname)<0)
+ return EVUTIL_EAI_NONAME;
+ }
+#endif
+
+ /* Enough operating systems handle enough common non-resolve
+ * cases here weirdly enough that we are better off just
+ * overriding them. For example:
+ *
+ * - Windows doesn't like to infer the protocol from the
+ * socket type, or fill in socket or protocol types much at
+ * all. It also seems to do its own broken implicit
+ * always-on version of AI_ADDRCONFIG that keeps it from
+ * ever resolving even a literal IPv6 address when
+ * ai_addrtype is PF_UNSPEC.
+ */
+#ifdef _WIN32
+ {
+ int tmp_port;
+ err = evutil_getaddrinfo_common_(nodename,servname,&hints,
+ res, &tmp_port);
+ if (err == 0 ||
+ err == EVUTIL_EAI_MEMORY ||
+ err == EVUTIL_EAI_NONAME)
+ return err;
+ /* If we make it here, the system getaddrinfo can
+ * have a crack at it. */
+ }
+#endif
+
+ /* See documentation for need_numeric_port_hack above.*/
+ need_np_hack = need_numeric_port_hack() && servname && !hints.ai_socktype
+ && ((portnum=parse_numeric_servname(servname)) >= 0);
+ if (need_np_hack) {
+ if (!nodename)
+ return evutil_getaddrinfo_common_(
+ NULL,servname,&hints, res, &portnum);
+ servname = NULL;
+ }
+
+ if (need_socktype_protocol_hack()) {
+ evutil_getaddrinfo_infer_protocols(&hints);
+ }
+
+ /* Make sure that we didn't actually steal any AI_FLAGS values that
+ * the system is using. (This is a constant expression, and should ge
+ * optimized out.)
+ *
+ * XXXX Turn this into a compile-time failure rather than a run-time
+ * failure.
+ */
+ EVUTIL_ASSERT((ALL_NONNATIVE_AI_FLAGS & ALL_NATIVE_AI_FLAGS) == 0);
+
+ /* Clear any flags that only libevent understands. */
+ hints.ai_flags &= ~ALL_NONNATIVE_AI_FLAGS;
+
+ err = getaddrinfo(nodename, servname, &hints, res);
+ if (need_np_hack)
+ apply_numeric_port_hack(portnum, res);
+
+ if (need_socktype_protocol_hack()) {
+ if (apply_socktype_protocol_hack(*res) < 0) {
+ evutil_freeaddrinfo(*res);
+ *res = NULL;
+ return EVUTIL_EAI_MEMORY;
+ }
+ }
+ return err;
+#else
+ int port=0, err;
+ struct hostent *ent = NULL;
+ struct evutil_addrinfo hints;
+
+ if (hints_in) {
+ memcpy(&hints, hints_in, sizeof(hints));
+ } else {
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ }
+
+ evutil_adjust_hints_for_addrconfig_(&hints);
+
+ err = evutil_getaddrinfo_common_(nodename, servname, &hints, res, &port);
+ if (err != EVUTIL_EAI_NEED_RESOLVE) {
+ /* We either succeeded or failed. No need to continue */
+ return err;
+ }
+
+ err = 0;
+ /* Use any of the various gethostbyname_r variants as available. */
+ {
+#ifdef EVENT__HAVE_GETHOSTBYNAME_R_6_ARG
+ /* This one is what glibc provides. */
+ char buf[2048];
+ struct hostent hostent;
+ int r;
+ r = gethostbyname_r(nodename, &hostent, buf, sizeof(buf), &ent,
+ &err);
+#elif defined(EVENT__HAVE_GETHOSTBYNAME_R_5_ARG)
+ char buf[2048];
+ struct hostent hostent;
+ ent = gethostbyname_r(nodename, &hostent, buf, sizeof(buf),
+ &err);
+#elif defined(EVENT__HAVE_GETHOSTBYNAME_R_3_ARG)
+ struct hostent_data data;
+ struct hostent hostent;
+ memset(&data, 0, sizeof(data));
+ err = gethostbyname_r(nodename, &hostent, &data);
+ ent = err ? NULL : &hostent;
+#else
+ /* fall back to gethostbyname. */
+ /* XXXX This needs a lock everywhere but Windows. */
+ ent = gethostbyname(nodename);
+#ifdef _WIN32
+ err = WSAGetLastError();
+#else
+ err = h_errno;
+#endif
+#endif
+
+ /* Now we have either ent or err set. */
+ if (!ent) {
+ /* XXX is this right for windows ? */
+ switch (err) {
+ case TRY_AGAIN:
+ return EVUTIL_EAI_AGAIN;
+ case NO_RECOVERY:
+ default:
+ return EVUTIL_EAI_FAIL;
+ case HOST_NOT_FOUND:
+ return EVUTIL_EAI_NONAME;
+ case NO_ADDRESS:
+#if NO_DATA != NO_ADDRESS
+ case NO_DATA:
+#endif
+ return EVUTIL_EAI_NODATA;
+ }
+ }
+
+ if (ent->h_addrtype != hints.ai_family &&
+ hints.ai_family != PF_UNSPEC) {
+ /* This wasn't the type we were hoping for. Too bad
+ * we never had a chance to ask gethostbyname for what
+ * we wanted. */
+ return EVUTIL_EAI_NONAME;
+ }
+
+ /* Make sure we got _some_ answers. */
+ if (ent->h_length == 0)
+ return EVUTIL_EAI_NODATA;
+
+ /* If we got an address type we don't know how to make a
+ sockaddr for, give up. */
+ if (ent->h_addrtype != PF_INET && ent->h_addrtype != PF_INET6)
+ return EVUTIL_EAI_FAMILY;
+
+ *res = addrinfo_from_hostent(ent, port, &hints);
+ if (! *res)
+ return EVUTIL_EAI_MEMORY;
+ }
+
+ return 0;
+#endif
+}
+
+void
+evutil_freeaddrinfo(struct evutil_addrinfo *ai)
+{
+#ifdef EVENT__HAVE_GETADDRINFO
+ if (!(ai->ai_flags & EVUTIL_AI_LIBEVENT_ALLOCATED)) {
+ freeaddrinfo(ai);
+ return;
+ }
+#endif
+ while (ai) {
+ struct evutil_addrinfo *next = ai->ai_next;
+ if (ai->ai_canonname)
+ mm_free(ai->ai_canonname);
+ mm_free(ai);
+ ai = next;
+ }
+}
+
+static evdns_getaddrinfo_fn evdns_getaddrinfo_impl = NULL;
+
+void
+evutil_set_evdns_getaddrinfo_fn_(evdns_getaddrinfo_fn fn)
+{
+ if (!evdns_getaddrinfo_impl)
+ evdns_getaddrinfo_impl = fn;
+}
+
+/* Internal helper function: act like evdns_getaddrinfo if dns_base is set;
+ * otherwise do a blocking resolve and pass the result to the callback in the
+ * way that evdns_getaddrinfo would.
+ */
+int
+evutil_getaddrinfo_async_(struct evdns_base *dns_base,
+ const char *nodename, const char *servname,
+ const struct evutil_addrinfo *hints_in,
+ void (*cb)(int, struct evutil_addrinfo *, void *), void *arg)
+{
+ if (dns_base && evdns_getaddrinfo_impl) {
+ evdns_getaddrinfo_impl(
+ dns_base, nodename, servname, hints_in, cb, arg);
+ } else {
+ struct evutil_addrinfo *ai=NULL;
+ int err;
+ err = evutil_getaddrinfo(nodename, servname, hints_in, &ai);
+ cb(err, ai, arg);
+ }
+ return 0;
+}
+
+const char *
+evutil_gai_strerror(int err)
+{
+ /* As a sneaky side-benefit, this case statement will get most
+ * compilers to tell us if any of the error codes we defined
+ * conflict with the platform's native error codes. */
+ switch (err) {
+ case EVUTIL_EAI_CANCEL:
+ return "Request canceled";
+ case 0:
+ return "No error";
+
+ case EVUTIL_EAI_ADDRFAMILY:
+ return "address family for nodename not supported";
+ case EVUTIL_EAI_AGAIN:
+ return "temporary failure in name resolution";
+ case EVUTIL_EAI_BADFLAGS:
+ return "invalid value for ai_flags";
+ case EVUTIL_EAI_FAIL:
+ return "non-recoverable failure in name resolution";
+ case EVUTIL_EAI_FAMILY:
+ return "ai_family not supported";
+ case EVUTIL_EAI_MEMORY:
+ return "memory allocation failure";
+ case EVUTIL_EAI_NODATA:
+ return "no address associated with nodename";
+ case EVUTIL_EAI_NONAME:
+ return "nodename nor servname provided, or not known";
+ case EVUTIL_EAI_SERVICE:
+ return "servname not supported for ai_socktype";
+ case EVUTIL_EAI_SOCKTYPE:
+ return "ai_socktype not supported";
+ case EVUTIL_EAI_SYSTEM:
+ return "system error";
+ default:
+#if defined(USE_NATIVE_GETADDRINFO) && defined(_WIN32)
+ return gai_strerrorA(err);
+#elif defined(USE_NATIVE_GETADDRINFO)
+ return gai_strerror(err);
+#else
+ return "Unknown error code";
+#endif
+ }
+}
+
+#ifdef _WIN32
+/* destructively remove a trailing line terminator from s */
+static void
+chomp (char *s)
+{
+ size_t len;
+ if (s && (len = strlen (s)) > 0 && s[len - 1] == '\n') {
+ s[--len] = 0;
+ if (len > 0 && s[len - 1] == '\r')
+ s[--len] = 0;
+ }
+}
+
+/* FormatMessage returns allocated strings, but evutil_socket_error_to_string
+ * is supposed to return a string which is good indefinitely without having
+ * to be freed. To make this work without leaking memory, we cache the
+ * string the first time FormatMessage is called on a particular error
+ * code, and then return the cached string on subsequent calls with the
+ * same code. The strings aren't freed until libevent_global_shutdown
+ * (or never). We use a linked list to cache the errors, because we
+ * only expect there to be a few dozen, and that should be fast enough.
+ */
+
+struct cached_sock_errs_entry {
+ HT_ENTRY(cached_sock_errs_entry) node;
+ DWORD code;
+ char *msg; /* allocated with LocalAlloc; free with LocalFree */
+};
+
+static inline unsigned
+hash_cached_sock_errs(const struct cached_sock_errs_entry *e)
+{
+ /* Use Murmur3's 32-bit finalizer as an integer hash function */
+ DWORD h = e->code;
+ h ^= h >> 16;
+ h *= 0x85ebca6b;
+ h ^= h >> 13;
+ h *= 0xc2b2ae35;
+ h ^= h >> 16;
+ return h;
+}
+
+static inline int
+eq_cached_sock_errs(const struct cached_sock_errs_entry *a,
+ const struct cached_sock_errs_entry *b)
+{
+ return a->code == b->code;
+}
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+static void *windows_socket_errors_lock_ = NULL;
+#endif
+
+static HT_HEAD(cached_sock_errs_map, cached_sock_errs_entry)
+ windows_socket_errors = HT_INITIALIZER();
+
+HT_PROTOTYPE(cached_sock_errs_map,
+ cached_sock_errs_entry,
+ node,
+ hash_cached_sock_errs,
+ eq_cached_sock_errs);
+
+HT_GENERATE(cached_sock_errs_map,
+ cached_sock_errs_entry,
+ node,
+ hash_cached_sock_errs,
+ eq_cached_sock_errs,
+ 0.5,
+ mm_malloc,
+ mm_realloc,
+ mm_free);
+
+/** Equivalent to strerror, but for windows socket errors. */
+const char *
+evutil_socket_error_to_string(int errcode)
+{
+ struct cached_sock_errs_entry *errs, *newerr, find;
+ char *msg = NULL;
+
+ EVLOCK_LOCK(windows_socket_errors_lock_, 0);
+
+ find.code = errcode;
+ errs = HT_FIND(cached_sock_errs_map, &windows_socket_errors, &find);
+ if (errs) {
+ msg = errs->msg;
+ goto done;
+ }
+
+ if (0 != FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS |
+ FORMAT_MESSAGE_ALLOCATE_BUFFER,
+ NULL, errcode, 0, (char *)&msg, 0, NULL))
+ chomp (msg); /* because message has trailing newline */
+ else {
+ size_t len = 50;
+ /* use LocalAlloc because FormatMessage does */
+ msg = LocalAlloc(LMEM_FIXED, len);
+ if (!msg) {
+ msg = (char *)"LocalAlloc failed during Winsock error";
+ goto done;
+ }
+ evutil_snprintf(msg, len, "winsock error 0x%08x", errcode);
+ }
+
+ newerr = (struct cached_sock_errs_entry *)
+ mm_malloc(sizeof (struct cached_sock_errs_entry));
+
+ if (!newerr) {
+ LocalFree(msg);
+ msg = (char *)"malloc failed during Winsock error";
+ goto done;
+ }
+
+ newerr->code = errcode;
+ newerr->msg = msg;
+ HT_INSERT(cached_sock_errs_map, &windows_socket_errors, newerr);
+
+ done:
+ EVLOCK_UNLOCK(windows_socket_errors_lock_, 0);
+
+ return msg;
+}
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+int
+evutil_global_setup_locks_(const int enable_locks)
+{
+ EVTHREAD_SETUP_GLOBAL_LOCK(windows_socket_errors_lock_, 0);
+ return 0;
+}
+#endif
+
+static void
+evutil_free_sock_err_globals(void)
+{
+ struct cached_sock_errs_entry **errs, *tofree;
+
+ for (errs = HT_START(cached_sock_errs_map, &windows_socket_errors)
+ ; errs; ) {
+ tofree = *errs;
+ errs = HT_NEXT_RMV(cached_sock_errs_map,
+ &windows_socket_errors,
+ errs);
+ LocalFree(tofree->msg);
+ mm_free(tofree);
+ }
+
+ HT_CLEAR(cached_sock_errs_map, &windows_socket_errors);
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (windows_socket_errors_lock_ != NULL) {
+ EVTHREAD_FREE_LOCK(windows_socket_errors_lock_, 0);
+ windows_socket_errors_lock_ = NULL;
+ }
+#endif
+}
+
+#else
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+int
+evutil_global_setup_locks_(const int enable_locks)
+{
+ return 0;
+}
+#endif
+
+static void
+evutil_free_sock_err_globals(void)
+{
+}
+
+#endif
+
+int
+evutil_snprintf(char *buf, size_t buflen, const char *format, ...)
+{
+ int r;
+ va_list ap;
+ va_start(ap, format);
+ r = evutil_vsnprintf(buf, buflen, format, ap);
+ va_end(ap);
+ return r;
+}
+
+int
+evutil_vsnprintf(char *buf, size_t buflen, const char *format, va_list ap)
+{
+ int r;
+ if (!buflen)
+ return 0;
+#if defined(_MSC_VER) || defined(_WIN32)
+ r = _vsnprintf(buf, buflen, format, ap);
+ if (r < 0)
+ r = _vscprintf(format, ap);
+#elif defined(sgi)
+ /* Make sure we always use the correct vsnprintf on IRIX */
+ extern int _xpg5_vsnprintf(char * __restrict,
+ __SGI_LIBC_NAMESPACE_QUALIFIER size_t,
+ const char * __restrict, /* va_list */ char *);
+
+ r = _xpg5_vsnprintf(buf, buflen, format, ap);
+#else
+ r = vsnprintf(buf, buflen, format, ap);
+#endif
+ buf[buflen-1] = '\0';
+ return r;
+}
+
+#define USE_INTERNAL_NTOP
+#define USE_INTERNAL_PTON
+
+const char *
+evutil_inet_ntop(int af, const void *src, char *dst, size_t len)
+{
+#if defined(EVENT__HAVE_INET_NTOP) && !defined(USE_INTERNAL_NTOP)
+ return inet_ntop(af, src, dst, len);
+#else
+ if (af == AF_INET) {
+ const struct in_addr *in = src;
+ const ev_uint32_t a = ntohl(in->s_addr);
+ int r;
+ r = evutil_snprintf(dst, len, "%d.%d.%d.%d",
+ (int)(ev_uint8_t)((a>>24)&0xff),
+ (int)(ev_uint8_t)((a>>16)&0xff),
+ (int)(ev_uint8_t)((a>>8 )&0xff),
+ (int)(ev_uint8_t)((a )&0xff));
+ if (r<0||(size_t)r>=len)
+ return NULL;
+ else
+ return dst;
+#ifdef AF_INET6
+ } else if (af == AF_INET6) {
+ const struct in6_addr *addr = src;
+ char buf[64], *cp;
+ int longestGapLen = 0, longestGapPos = -1, i,
+ curGapPos = -1, curGapLen = 0;
+ ev_uint16_t words[8];
+ for (i = 0; i < 8; ++i) {
+ words[i] =
+ (((ev_uint16_t)addr->s6_addr[2*i])<<8) + addr->s6_addr[2*i+1];
+ }
+ if (words[0] == 0 && words[1] == 0 && words[2] == 0 && words[3] == 0 &&
+ words[4] == 0 && ((words[5] == 0 && words[6] && words[7]) ||
+ (words[5] == 0xffff))) {
+ /* This is an IPv4 address. */
+ if (words[5] == 0) {
+ evutil_snprintf(buf, sizeof(buf), "::%d.%d.%d.%d",
+ addr->s6_addr[12], addr->s6_addr[13],
+ addr->s6_addr[14], addr->s6_addr[15]);
+ } else {
+ evutil_snprintf(buf, sizeof(buf), "::%x:%d.%d.%d.%d", words[5],
+ addr->s6_addr[12], addr->s6_addr[13],
+ addr->s6_addr[14], addr->s6_addr[15]);
+ }
+ if (strlen(buf) > len)
+ return NULL;
+ strlcpy(dst, buf, len);
+ return dst;
+ }
+ i = 0;
+ while (i < 8) {
+ if (words[i] == 0) {
+ curGapPos = i++;
+ curGapLen = 1;
+ while (i<8 && words[i] == 0) {
+ ++i; ++curGapLen;
+ }
+ if (curGapLen > longestGapLen) {
+ longestGapPos = curGapPos;
+ longestGapLen = curGapLen;
+ }
+ } else {
+ ++i;
+ }
+ }
+ if (longestGapLen<=1)
+ longestGapPos = -1;
+
+ cp = buf;
+ for (i = 0; i < 8; ++i) {
+ if (words[i] == 0 && longestGapPos == i) {
+ if (i == 0)
+ *cp++ = ':';
+ *cp++ = ':';
+ while (i < 8 && words[i] == 0)
+ ++i;
+ --i; /* to compensate for loop increment. */
+ } else {
+ evutil_snprintf(cp,
+ sizeof(buf)-(cp-buf), "%x", (unsigned)words[i]);
+ cp += strlen(cp);
+ if (i != 7)
+ *cp++ = ':';
+ }
+ }
+ *cp = '\0';
+ if (strlen(buf) > len)
+ return NULL;
+ strlcpy(dst, buf, len);
+ return dst;
+#endif
+ } else {
+ return NULL;
+ }
+#endif
+}
+
+int
+evutil_inet_pton(int af, const char *src, void *dst)
+{
+#if defined(EVENT__HAVE_INET_PTON) && !defined(USE_INTERNAL_PTON)
+ return inet_pton(af, src, dst);
+#else
+ if (af == AF_INET) {
+ unsigned a,b,c,d;
+ char more;
+ struct in_addr *addr = dst;
+ if (sscanf(src, "%u.%u.%u.%u%c", &a,&b,&c,&d,&more) != 4)
+ return 0;
+ if (a > 255) return 0;
+ if (b > 255) return 0;
+ if (c > 255) return 0;
+ if (d > 255) return 0;
+ addr->s_addr = htonl((a<<24) | (b<<16) | (c<<8) | d);
+ return 1;
+#ifdef AF_INET6
+ } else if (af == AF_INET6) {
+ struct in6_addr *out = dst;
+ ev_uint16_t words[8];
+ int gapPos = -1, i, setWords=0;
+ const char *dot = strchr(src, '.');
+ const char *eow; /* end of words. */
+ if (dot == src)
+ return 0;
+ else if (!dot)
+ eow = src+strlen(src);
+ else {
+ unsigned byte1,byte2,byte3,byte4;
+ char more;
+ for (eow = dot-1; eow >= src && EVUTIL_ISDIGIT_(*eow); --eow)
+ ;
+ ++eow;
+
+ /* We use "scanf" because some platform inet_aton()s are too lax
+ * about IPv4 addresses of the form "1.2.3" */
+ if (sscanf(eow, "%u.%u.%u.%u%c",
+ &byte1,&byte2,&byte3,&byte4,&more) != 4)
+ return 0;
+
+ if (byte1 > 255 ||
+ byte2 > 255 ||
+ byte3 > 255 ||
+ byte4 > 255)
+ return 0;
+
+ words[6] = (byte1<<8) | byte2;
+ words[7] = (byte3<<8) | byte4;
+ setWords += 2;
+ }
+
+ i = 0;
+ while (src < eow) {
+ if (i > 7)
+ return 0;
+ if (EVUTIL_ISXDIGIT_(*src)) {
+ char *next;
+ long r = strtol(src, &next, 16);
+ if (next > 4+src)
+ return 0;
+ if (next == src)
+ return 0;
+ if (r<0 || r>65536)
+ return 0;
+
+ words[i++] = (ev_uint16_t)r;
+ setWords++;
+ src = next;
+ if (*src != ':' && src != eow)
+ return 0;
+ ++src;
+ } else if (*src == ':' && i > 0 && gapPos==-1) {
+ gapPos = i;
+ ++src;
+ } else if (*src == ':' && i == 0 && src[1] == ':' && gapPos==-1) {
+ gapPos = i;
+ src += 2;
+ } else {
+ return 0;
+ }
+ }
+
+ if (setWords > 8 ||
+ (setWords == 8 && gapPos != -1) ||
+ (setWords < 8 && gapPos == -1))
+ return 0;
+
+ if (gapPos >= 0) {
+ int nToMove = setWords - (dot ? 2 : 0) - gapPos;
+ int gapLen = 8 - setWords;
+ /* assert(nToMove >= 0); */
+ if (nToMove < 0)
+ return -1; /* should be impossible */
+ memmove(&words[gapPos+gapLen], &words[gapPos],
+ sizeof(ev_uint16_t)*nToMove);
+ memset(&words[gapPos], 0, sizeof(ev_uint16_t)*gapLen);
+ }
+ for (i = 0; i < 8; ++i) {
+ out->s6_addr[2*i ] = words[i] >> 8;
+ out->s6_addr[2*i+1] = words[i] & 0xff;
+ }
+
+ return 1;
+#endif
+ } else {
+ return -1;
+ }
+#endif
+}
+
+int
+evutil_parse_sockaddr_port(const char *ip_as_string, struct sockaddr *out, int *outlen)
+{
+ int port;
+ char buf[128];
+ const char *cp, *addr_part, *port_part;
+ int is_ipv6;
+ /* recognized formats are:
+ * [ipv6]:port
+ * ipv6
+ * [ipv6]
+ * ipv4:port
+ * ipv4
+ */
+
+ cp = strchr(ip_as_string, ':');
+ if (*ip_as_string == '[') {
+ size_t len;
+ if (!(cp = strchr(ip_as_string, ']'))) {
+ return -1;
+ }
+ len = ( cp-(ip_as_string + 1) );
+ if (len > sizeof(buf)-1) {
+ return -1;
+ }
+ memcpy(buf, ip_as_string+1, len);
+ buf[len] = '\0';
+ addr_part = buf;
+ if (cp[1] == ':')
+ port_part = cp+2;
+ else
+ port_part = NULL;
+ is_ipv6 = 1;
+ } else if (cp && strchr(cp+1, ':')) {
+ is_ipv6 = 1;
+ addr_part = ip_as_string;
+ port_part = NULL;
+ } else if (cp) {
+ is_ipv6 = 0;
+ if (cp - ip_as_string > (int)sizeof(buf)-1) {
+ return -1;
+ }
+ memcpy(buf, ip_as_string, cp-ip_as_string);
+ buf[cp-ip_as_string] = '\0';
+ addr_part = buf;
+ port_part = cp+1;
+ } else {
+ addr_part = ip_as_string;
+ port_part = NULL;
+ is_ipv6 = 0;
+ }
+
+ if (port_part == NULL) {
+ port = 0;
+ } else {
+ port = atoi(port_part);
+ if (port <= 0 || port > 65535) {
+ return -1;
+ }
+ }
+
+ if (!addr_part)
+ return -1; /* Should be impossible. */
+#ifdef AF_INET6
+ if (is_ipv6)
+ {
+ struct sockaddr_in6 sin6;
+ memset(&sin6, 0, sizeof(sin6));
+#ifdef EVENT__HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN
+ sin6.sin6_len = sizeof(sin6);
+#endif
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = htons(port);
+ if (1 != evutil_inet_pton(AF_INET6, addr_part, &sin6.sin6_addr))
+ return -1;
+ if ((int)sizeof(sin6) > *outlen)
+ return -1;
+ memset(out, 0, *outlen);
+ memcpy(out, &sin6, sizeof(sin6));
+ *outlen = sizeof(sin6);
+ return 0;
+ }
+ else
+#endif
+ {
+ struct sockaddr_in sin;
+ memset(&sin, 0, sizeof(sin));
+#ifdef EVENT__HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
+ sin.sin_len = sizeof(sin);
+#endif
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(port);
+ if (1 != evutil_inet_pton(AF_INET, addr_part, &sin.sin_addr))
+ return -1;
+ if ((int)sizeof(sin) > *outlen)
+ return -1;
+ memset(out, 0, *outlen);
+ memcpy(out, &sin, sizeof(sin));
+ *outlen = sizeof(sin);
+ return 0;
+ }
+}
+
+const char *
+evutil_format_sockaddr_port_(const struct sockaddr *sa, char *out, size_t outlen)
+{
+ char b[128];
+ const char *res=NULL;
+ int port;
+ if (sa->sa_family == AF_INET) {
+ const struct sockaddr_in *sin = (const struct sockaddr_in*)sa;
+ res = evutil_inet_ntop(AF_INET, &sin->sin_addr,b,sizeof(b));
+ port = ntohs(sin->sin_port);
+ if (res) {
+ evutil_snprintf(out, outlen, "%s:%d", b, port);
+ return out;
+ }
+ } else if (sa->sa_family == AF_INET6) {
+ const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6*)sa;
+ res = evutil_inet_ntop(AF_INET6, &sin6->sin6_addr,b,sizeof(b));
+ port = ntohs(sin6->sin6_port);
+ if (res) {
+ evutil_snprintf(out, outlen, "[%s]:%d", b, port);
+ return out;
+ }
+ }
+
+ evutil_snprintf(out, outlen, "<addr with socktype %d>",
+ (int)sa->sa_family);
+ return out;
+}
+
+int
+evutil_sockaddr_cmp(const struct sockaddr *sa1, const struct sockaddr *sa2,
+ int include_port)
+{
+ int r;
+ if (0 != (r = (sa1->sa_family - sa2->sa_family)))
+ return r;
+
+ if (sa1->sa_family == AF_INET) {
+ const struct sockaddr_in *sin1, *sin2;
+ sin1 = (const struct sockaddr_in *)sa1;
+ sin2 = (const struct sockaddr_in *)sa2;
+ if (sin1->sin_addr.s_addr < sin2->sin_addr.s_addr)
+ return -1;
+ else if (sin1->sin_addr.s_addr > sin2->sin_addr.s_addr)
+ return 1;
+ else if (include_port &&
+ (r = ((int)sin1->sin_port - (int)sin2->sin_port)))
+ return r;
+ else
+ return 0;
+ }
+#ifdef AF_INET6
+ else if (sa1->sa_family == AF_INET6) {
+ const struct sockaddr_in6 *sin1, *sin2;
+ sin1 = (const struct sockaddr_in6 *)sa1;
+ sin2 = (const struct sockaddr_in6 *)sa2;
+ if ((r = memcmp(sin1->sin6_addr.s6_addr, sin2->sin6_addr.s6_addr, 16)))
+ return r;
+ else if (include_port &&
+ (r = ((int)sin1->sin6_port - (int)sin2->sin6_port)))
+ return r;
+ else
+ return 0;
+ }
+#endif
+ return 1;
+}
+
+/* Tables to implement ctypes-replacement EVUTIL_IS*() functions. Each table
+ * has 256 bits to look up whether a character is in some set or not. This
+ * fails on non-ASCII platforms, but so does every other place where we
+ * take a char and write it onto the network.
+ **/
+static const ev_uint32_t EVUTIL_ISALPHA_TABLE[8] =
+ { 0, 0, 0x7fffffe, 0x7fffffe, 0, 0, 0, 0 };
+static const ev_uint32_t EVUTIL_ISALNUM_TABLE[8] =
+ { 0, 0x3ff0000, 0x7fffffe, 0x7fffffe, 0, 0, 0, 0 };
+static const ev_uint32_t EVUTIL_ISSPACE_TABLE[8] = { 0x3e00, 0x1, 0, 0, 0, 0, 0, 0 };
+static const ev_uint32_t EVUTIL_ISXDIGIT_TABLE[8] =
+ { 0, 0x3ff0000, 0x7e, 0x7e, 0, 0, 0, 0 };
+static const ev_uint32_t EVUTIL_ISDIGIT_TABLE[8] = { 0, 0x3ff0000, 0, 0, 0, 0, 0, 0 };
+static const ev_uint32_t EVUTIL_ISPRINT_TABLE[8] =
+ { 0, 0xffffffff, 0xffffffff, 0x7fffffff, 0, 0, 0, 0x0 };
+static const ev_uint32_t EVUTIL_ISUPPER_TABLE[8] = { 0, 0, 0x7fffffe, 0, 0, 0, 0, 0 };
+static const ev_uint32_t EVUTIL_ISLOWER_TABLE[8] = { 0, 0, 0, 0x7fffffe, 0, 0, 0, 0 };
+/* Upper-casing and lowercasing tables to map characters to upper/lowercase
+ * equivalents. */
+static const unsigned char EVUTIL_TOUPPER_TABLE[256] = {
+ 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
+ 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
+ 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
+ 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
+ 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
+ 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
+ 96,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
+ 80,81,82,83,84,85,86,87,88,89,90,123,124,125,126,127,
+ 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,
+ 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,
+ 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,
+ 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,
+ 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,
+ 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,
+ 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,
+ 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,
+};
+static const unsigned char EVUTIL_TOLOWER_TABLE[256] = {
+ 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
+ 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
+ 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
+ 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
+ 64,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
+ 112,113,114,115,116,117,118,119,120,121,122,91,92,93,94,95,
+ 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
+ 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
+ 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,
+ 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,
+ 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,
+ 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,
+ 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,
+ 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,
+ 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,
+ 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,
+};
+
+#define IMPL_CTYPE_FN(name) \
+ int EVUTIL_##name##_(char c) { \
+ ev_uint8_t u = c; \
+ return !!(EVUTIL_##name##_TABLE[(u >> 5) & 7] & (1 << (u & 31))); \
+ }
+IMPL_CTYPE_FN(ISALPHA)
+IMPL_CTYPE_FN(ISALNUM)
+IMPL_CTYPE_FN(ISSPACE)
+IMPL_CTYPE_FN(ISDIGIT)
+IMPL_CTYPE_FN(ISXDIGIT)
+IMPL_CTYPE_FN(ISPRINT)
+IMPL_CTYPE_FN(ISLOWER)
+IMPL_CTYPE_FN(ISUPPER)
+
+char EVUTIL_TOLOWER_(char c)
+{
+ return ((char)EVUTIL_TOLOWER_TABLE[(ev_uint8_t)c]);
+}
+char EVUTIL_TOUPPER_(char c)
+{
+ return ((char)EVUTIL_TOUPPER_TABLE[(ev_uint8_t)c]);
+}
+int
+evutil_ascii_strcasecmp(const char *s1, const char *s2)
+{
+ char c1, c2;
+ while (1) {
+ c1 = EVUTIL_TOLOWER_(*s1++);
+ c2 = EVUTIL_TOLOWER_(*s2++);
+ if (c1 < c2)
+ return -1;
+ else if (c1 > c2)
+ return 1;
+ else if (c1 == 0)
+ return 0;
+ }
+}
+int evutil_ascii_strncasecmp(const char *s1, const char *s2, size_t n)
+{
+ char c1, c2;
+ while (n--) {
+ c1 = EVUTIL_TOLOWER_(*s1++);
+ c2 = EVUTIL_TOLOWER_(*s2++);
+ if (c1 < c2)
+ return -1;
+ else if (c1 > c2)
+ return 1;
+ else if (c1 == 0)
+ return 0;
+ }
+ return 0;
+}
+
+void
+evutil_rtrim_lws_(char *str)
+{
+ char *cp;
+
+ if (str == NULL)
+ return;
+
+ if ((cp = strchr(str, '\0')) == NULL || (cp == str))
+ return;
+
+ --cp;
+
+ while (*cp == ' ' || *cp == '\t') {
+ *cp = '\0';
+ if (cp == str)
+ break;
+ --cp;
+ }
+}
+
+static int
+evutil_issetugid(void)
+{
+#ifdef EVENT__HAVE_ISSETUGID
+ return issetugid();
+#else
+
+#ifdef EVENT__HAVE_GETEUID
+ if (getuid() != geteuid())
+ return 1;
+#endif
+#ifdef EVENT__HAVE_GETEGID
+ if (getgid() != getegid())
+ return 1;
+#endif
+ return 0;
+#endif
+}
+
+const char *
+evutil_getenv_(const char *varname)
+{
+ if (evutil_issetugid())
+ return NULL;
+
+ return getenv(varname);
+}
+
+ev_uint32_t
+evutil_weakrand_seed_(struct evutil_weakrand_state *state, ev_uint32_t seed)
+{
+ if (seed == 0) {
+ struct timeval tv;
+ evutil_gettimeofday(&tv, NULL);
+ seed = (ev_uint32_t)tv.tv_sec + (ev_uint32_t)tv.tv_usec;
+#ifdef _WIN32
+ seed += (ev_uint32_t) _getpid();
+#else
+ seed += (ev_uint32_t) getpid();
+#endif
+ }
+ state->seed = seed;
+ return seed;
+}
+
+ev_int32_t
+evutil_weakrand_(struct evutil_weakrand_state *state)
+{
+ /* This RNG implementation is a linear congruential generator, with
+ * modulus 2^31, multiplier 1103515245, and addend 12345. It's also
+ * used by OpenBSD, and by Glibc's TYPE_0 RNG.
+ *
+ * The linear congruential generator is not an industrial-strength
+ * RNG! It's fast, but it can have higher-order patterns. Notably,
+ * the low bits tend to have periodicity.
+ */
+ state->seed = ((state->seed) * 1103515245 + 12345) & 0x7fffffff;
+ return (ev_int32_t)(state->seed);
+}
+
+ev_int32_t
+evutil_weakrand_range_(struct evutil_weakrand_state *state, ev_int32_t top)
+{
+ ev_int32_t divisor, result;
+
+ /* We can't just do weakrand() % top, since the low bits of the LCG
+ * are less random than the high ones. (Specifically, since the LCG
+ * modulus is 2^N, every 2^m for m<N will divide the modulus, and so
+ * therefore the low m bits of the LCG will have period 2^m.) */
+ divisor = EVUTIL_WEAKRAND_MAX / top;
+ do {
+ result = evutil_weakrand_(state) / divisor;
+ } while (result >= top);
+ return result;
+}
+
+/**
+ * Volatile pointer to memset: we use this to keep the compiler from
+ * eliminating our call to memset.
+ */
+void * (*volatile evutil_memset_volatile_)(void *, int, size_t) = memset;
+
+void
+evutil_memclear_(void *mem, size_t len)
+{
+ evutil_memset_volatile_(mem, 0, len);
+}
+
+int
+evutil_sockaddr_is_loopback_(const struct sockaddr *addr)
+{
+ static const char LOOPBACK_S6[16] =
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1";
+ if (addr->sa_family == AF_INET) {
+ struct sockaddr_in *sin = (struct sockaddr_in *)addr;
+ return (ntohl(sin->sin_addr.s_addr) & 0xff000000) == 0x7f000000;
+ } else if (addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
+ return !memcmp(sin6->sin6_addr.s6_addr, LOOPBACK_S6, 16);
+ }
+ return 0;
+}
+
+int
+evutil_hex_char_to_int_(char c)
+{
+ switch(c)
+ {
+ case '0': return 0;
+ case '1': return 1;
+ case '2': return 2;
+ case '3': return 3;
+ case '4': return 4;
+ case '5': return 5;
+ case '6': return 6;
+ case '7': return 7;
+ case '8': return 8;
+ case '9': return 9;
+ case 'A': case 'a': return 10;
+ case 'B': case 'b': return 11;
+ case 'C': case 'c': return 12;
+ case 'D': case 'd': return 13;
+ case 'E': case 'e': return 14;
+ case 'F': case 'f': return 15;
+ }
+ return -1;
+}
+
+#ifdef _WIN32
+HMODULE
+evutil_load_windows_system_library_(const TCHAR *library_name)
+{
+ TCHAR path[MAX_PATH];
+ unsigned n;
+ n = GetSystemDirectory(path, MAX_PATH);
+ if (n == 0 || n + _tcslen(library_name) + 2 >= MAX_PATH)
+ return 0;
+ _tcscat(path, TEXT("\\"));
+ _tcscat(path, library_name);
+ return LoadLibrary(path);
+}
+#endif
+
+/* Internal wrapper around 'socket' to provide Linux-style support for
+ * syscall-saving methods where available.
+ *
+ * In addition to regular socket behavior, you can use a bitwise or to set the
+ * flags EVUTIL_SOCK_NONBLOCK and EVUTIL_SOCK_CLOEXEC in the 'type' argument,
+ * to make the socket nonblocking or close-on-exec with as few syscalls as
+ * possible.
+ */
+evutil_socket_t
+evutil_socket_(int domain, int type, int protocol)
+{
+ evutil_socket_t r;
+#if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
+ r = socket(domain, type, protocol);
+ if (r >= 0)
+ return r;
+ else if ((type & (SOCK_NONBLOCK|SOCK_CLOEXEC)) == 0)
+ return -1;
+#endif
+#define SOCKET_TYPE_MASK (~(EVUTIL_SOCK_NONBLOCK|EVUTIL_SOCK_CLOEXEC))
+ r = socket(domain, type & SOCKET_TYPE_MASK, protocol);
+ if (r < 0)
+ return -1;
+ if (type & EVUTIL_SOCK_NONBLOCK) {
+ if (evutil_fast_socket_nonblocking(r) < 0) {
+ evutil_closesocket(r);
+ return -1;
+ }
+ }
+ if (type & EVUTIL_SOCK_CLOEXEC) {
+ if (evutil_fast_socket_closeonexec(r) < 0) {
+ evutil_closesocket(r);
+ return -1;
+ }
+ }
+ return r;
+}
+
+/* Internal wrapper around 'accept' or 'accept4' to provide Linux-style
+ * support for syscall-saving methods where available.
+ *
+ * In addition to regular accept behavior, you can set one or more of flags
+ * EVUTIL_SOCK_NONBLOCK and EVUTIL_SOCK_CLOEXEC in the 'flags' argument, to
+ * make the socket nonblocking or close-on-exec with as few syscalls as
+ * possible.
+ */
+evutil_socket_t
+evutil_accept4_(evutil_socket_t sockfd, struct sockaddr *addr,
+ ev_socklen_t *addrlen, int flags)
+{
+ evutil_socket_t result;
+#if defined(EVENT__HAVE_ACCEPT4) && defined(SOCK_CLOEXEC) && defined(SOCK_NONBLOCK)
+ result = accept4(sockfd, addr, addrlen, flags);
+ if (result >= 0 || (errno != EINVAL && errno != ENOSYS)) {
+ /* A nonnegative result means that we succeeded, so return.
+ * Failing with EINVAL means that an option wasn't supported,
+ * and failing with ENOSYS means that the syscall wasn't
+ * there: in those cases we want to fall back. Otherwise, we
+ * got a real error, and we should return. */
+ return result;
+ }
+#endif
+ result = accept(sockfd, addr, addrlen);
+ if (result < 0)
+ return result;
+
+ if (flags & EVUTIL_SOCK_CLOEXEC) {
+ if (evutil_fast_socket_closeonexec(result) < 0) {
+ evutil_closesocket(result);
+ return -1;
+ }
+ }
+ if (flags & EVUTIL_SOCK_NONBLOCK) {
+ if (evutil_fast_socket_nonblocking(result) < 0) {
+ evutil_closesocket(result);
+ return -1;
+ }
+ }
+ return result;
+}
+
+/* Internal function: Set fd[0] and fd[1] to a pair of fds such that writes on
+ * fd[0] get read from fd[1]. Make both fds nonblocking and close-on-exec.
+ * Return 0 on success, -1 on failure.
+ */
+int
+evutil_make_internal_pipe_(evutil_socket_t fd[2])
+{
+ /*
+ Making the second socket nonblocking is a bit subtle, given that we
+ ignore any EAGAIN returns when writing to it, and you don't usally
+ do that for a nonblocking socket. But if the kernel gives us EAGAIN,
+ then there's no need to add any more data to the buffer, since
+ the main thread is already either about to wake up and drain it,
+ or woken up and in the process of draining it.
+ */
+
+#if defined(EVENT__HAVE_PIPE2)
+ if (pipe2(fd, O_NONBLOCK|O_CLOEXEC) == 0)
+ return 0;
+#endif
+#if defined(EVENT__HAVE_PIPE)
+ if (pipe(fd) == 0) {
+ if (evutil_fast_socket_nonblocking(fd[0]) < 0 ||
+ evutil_fast_socket_nonblocking(fd[1]) < 0 ||
+ evutil_fast_socket_closeonexec(fd[0]) < 0 ||
+ evutil_fast_socket_closeonexec(fd[1]) < 0) {
+ close(fd[0]);
+ close(fd[1]);
+ fd[0] = fd[1] = -1;
+ return -1;
+ }
+ return 0;
+ } else {
+ event_warn("%s: pipe", __func__);
+ }
+#endif
+
+#ifdef _WIN32
+#define LOCAL_SOCKETPAIR_AF AF_INET
+#else
+#define LOCAL_SOCKETPAIR_AF AF_UNIX
+#endif
+ if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, fd) == 0) {
+ if (evutil_fast_socket_nonblocking(fd[0]) < 0 ||
+ evutil_fast_socket_nonblocking(fd[1]) < 0 ||
+ evutil_fast_socket_closeonexec(fd[0]) < 0 ||
+ evutil_fast_socket_closeonexec(fd[1]) < 0) {
+ evutil_closesocket(fd[0]);
+ evutil_closesocket(fd[1]);
+ fd[0] = fd[1] = -1;
+ return -1;
+ }
+ return 0;
+ }
+ fd[0] = fd[1] = -1;
+ return -1;
+}
+
+/* Wrapper around eventfd on systems that provide it. Unlike the system
+ * eventfd, it always supports EVUTIL_EFD_CLOEXEC and EVUTIL_EFD_NONBLOCK as
+ * flags. Returns -1 on error or if eventfd is not supported.
+ */
+evutil_socket_t
+evutil_eventfd_(unsigned initval, int flags)
+{
+#if defined(EVENT__HAVE_EVENTFD) && defined(EVENT__HAVE_SYS_EVENTFD_H)
+ int r;
+#if defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)
+ r = eventfd(initval, flags);
+ if (r >= 0 || flags == 0)
+ return r;
+#endif
+ r = eventfd(initval, 0);
+ if (r < 0)
+ return r;
+ if (flags & EVUTIL_EFD_CLOEXEC) {
+ if (evutil_fast_socket_closeonexec(r) < 0) {
+ evutil_closesocket(r);
+ return -1;
+ }
+ }
+ if (flags & EVUTIL_EFD_NONBLOCK) {
+ if (evutil_fast_socket_nonblocking(r) < 0) {
+ evutil_closesocket(r);
+ return -1;
+ }
+ }
+ return r;
+#else
+ return -1;
+#endif
+}
+
+void
+evutil_free_globals_(void)
+{
+ evutil_free_secure_rng_globals_();
+ evutil_free_sock_err_globals();
+}
diff --git a/libs/libevent/src/evutil_rand.c b/libs/libevent/src/evutil_rand.c
new file mode 100644
index 0000000000..046a14b07a
--- /dev/null
+++ b/libs/libevent/src/evutil_rand.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* This file has our secure PRNG code. On platforms that have arc4random(),
+ * we just use that. Otherwise, we include arc4random.c as a bunch of static
+ * functions, and wrap it lightly. We don't expose the arc4random*() APIs
+ * because A) they aren't in our namespace, and B) it's not nice to name your
+ * APIs after their implementations. We keep them in a separate file
+ * so that other people can rip it out and use it for whatever.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <limits.h>
+
+#include "util-internal.h"
+#include "evthread-internal.h"
+
+#ifdef EVENT__HAVE_ARC4RANDOM
+#include <stdlib.h>
+#include <string.h>
+int
+evutil_secure_rng_set_urandom_device_file(char *fname)
+{
+ (void) fname;
+ return -1;
+}
+int
+evutil_secure_rng_init(void)
+{
+ /* call arc4random() now to force it to self-initialize */
+ (void) arc4random();
+ return 0;
+}
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+int
+evutil_secure_rng_global_setup_locks_(const int enable_locks)
+{
+ return 0;
+}
+#endif
+static void
+evutil_free_secure_rng_globals_locks(void)
+{
+}
+
+static void
+ev_arc4random_buf(void *buf, size_t n)
+{
+#if defined(EVENT__HAVE_ARC4RANDOM_BUF) && !defined(__APPLE__)
+ arc4random_buf(buf, n);
+ return;
+#else
+ unsigned char *b = buf;
+
+#if defined(EVENT__HAVE_ARC4RANDOM_BUF)
+ /* OSX 10.7 introducd arc4random_buf, so if you build your program
+ * there, you'll get surprised when older versions of OSX fail to run.
+ * To solve this, we can check whether the function pointer is set,
+ * and fall back otherwise. (OSX does this using some linker
+ * trickery.)
+ */
+ {
+ void (*tptr)(void *,size_t) =
+ (void (*)(void*,size_t))arc4random_buf;
+ if (tptr != NULL) {
+ arc4random_buf(buf, n);
+ return;
+ }
+ }
+#endif
+ /* Make sure that we start out with b at a 4-byte alignment; plenty
+ * of CPUs care about this for 32-bit access. */
+ if (n >= 4 && ((ev_uintptr_t)b) & 3) {
+ ev_uint32_t u = arc4random();
+ int n_bytes = 4 - (((ev_uintptr_t)b) & 3);
+ memcpy(b, &u, n_bytes);
+ b += n_bytes;
+ n -= n_bytes;
+ }
+ while (n >= 4) {
+ *(ev_uint32_t*)b = arc4random();
+ b += 4;
+ n -= 4;
+ }
+ if (n) {
+ ev_uint32_t u = arc4random();
+ memcpy(b, &u, n);
+ }
+#endif
+}
+
+#else /* !EVENT__HAVE_ARC4RANDOM { */
+
+#ifdef EVENT__ssize_t
+#define ssize_t EVENT__ssize_t
+#endif
+#define ARC4RANDOM_EXPORT static
+#define ARC4_LOCK_() EVLOCK_LOCK(arc4rand_lock, 0)
+#define ARC4_UNLOCK_() EVLOCK_UNLOCK(arc4rand_lock, 0)
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+static void *arc4rand_lock;
+#endif
+
+#define ARC4RANDOM_UINT32 ev_uint32_t
+#define ARC4RANDOM_NOSTIR
+#define ARC4RANDOM_NORANDOM
+#define ARC4RANDOM_NOUNIFORM
+
+#include "./arc4random.c"
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+int
+evutil_secure_rng_global_setup_locks_(const int enable_locks)
+{
+ EVTHREAD_SETUP_GLOBAL_LOCK(arc4rand_lock, 0);
+ return 0;
+}
+#endif
+
+static void
+evutil_free_secure_rng_globals_locks(void)
+{
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (arc4rand_lock != NULL) {
+ EVTHREAD_FREE_LOCK(arc4rand_lock, 0);
+ arc4rand_lock = NULL;
+ }
+#endif
+ return;
+}
+
+int
+evutil_secure_rng_set_urandom_device_file(char *fname)
+{
+#ifdef TRY_SEED_URANDOM
+ ARC4_LOCK_();
+ arc4random_urandom_filename = fname;
+ ARC4_UNLOCK_();
+#endif
+ return 0;
+}
+
+int
+evutil_secure_rng_init(void)
+{
+ int val;
+
+ ARC4_LOCK_();
+ if (!arc4_seeded_ok)
+ arc4_stir();
+ val = arc4_seeded_ok ? 0 : -1;
+ ARC4_UNLOCK_();
+ return val;
+}
+
+static void
+ev_arc4random_buf(void *buf, size_t n)
+{
+ arc4random_buf(buf, n);
+}
+
+#endif /* } !EVENT__HAVE_ARC4RANDOM */
+
+void
+evutil_secure_rng_get_bytes(void *buf, size_t n)
+{
+ ev_arc4random_buf(buf, n);
+}
+
+void
+evutil_secure_rng_add_bytes(const char *buf, size_t n)
+{
+ arc4random_addrandom((unsigned char*)buf,
+ n>(size_t)INT_MAX ? INT_MAX : (int)n);
+}
+
+void
+evutil_free_secure_rng_globals_(void)
+{
+ evutil_free_secure_rng_globals_locks();
+}
diff --git a/libs/libevent/src/evutil_time.c b/libs/libevent/src/evutil_time.c
new file mode 100644
index 0000000000..8f53c66b68
--- /dev/null
+++ b/libs/libevent/src/evutil_time.c
@@ -0,0 +1,538 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+
+#include <sys/types.h>
+#ifdef EVENT__HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#include <errno.h>
+#include <limits.h>
+#ifndef EVENT__HAVE_GETTIMEOFDAY
+#include <sys/timeb.h>
+#endif
+#if !defined(EVENT__HAVE_NANOSLEEP) && !defined(EVENT_HAVE_USLEEP) && \
+ !defined(_WIN32)
+#include <sys/select.h>
+#endif
+#include <time.h>
+#include <sys/stat.h>
+#include <string.h>
+
+#include "event2/util.h"
+#include "util-internal.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+
+#ifndef EVENT__HAVE_GETTIMEOFDAY
+/* No gettimeofday; this must be windows. */
+int
+evutil_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+#ifdef _MSC_VER
+#define U64_LITERAL(n) n##ui64
+#else
+#define U64_LITERAL(n) n##llu
+#endif
+
+ /* Conversion logic taken from Tor, which in turn took it
+ * from Perl. GetSystemTimeAsFileTime returns its value as
+ * an unaligned (!) 64-bit value containing the number of
+ * 100-nanosecond intervals since 1 January 1601 UTC. */
+#define EPOCH_BIAS U64_LITERAL(116444736000000000)
+#define UNITS_PER_SEC U64_LITERAL(10000000)
+#define USEC_PER_SEC U64_LITERAL(1000000)
+#define UNITS_PER_USEC U64_LITERAL(10)
+ union {
+ FILETIME ft_ft;
+ ev_uint64_t ft_64;
+ } ft;
+
+ if (tv == NULL)
+ return -1;
+
+ GetSystemTimeAsFileTime(&ft.ft_ft);
+
+ if (EVUTIL_UNLIKELY(ft.ft_64 < EPOCH_BIAS)) {
+ /* Time before the unix epoch. */
+ return -1;
+ }
+ ft.ft_64 -= EPOCH_BIAS;
+ tv->tv_sec = (long) (ft.ft_64 / UNITS_PER_SEC);
+ tv->tv_usec = (long) ((ft.ft_64 / UNITS_PER_USEC) % USEC_PER_SEC);
+ return 0;
+}
+#endif
+
+#define MAX_SECONDS_IN_MSEC_LONG \
+ (((LONG_MAX) - 999) / 1000)
+
+long
+evutil_tv_to_msec_(const struct timeval *tv)
+{
+ if (tv->tv_usec > 1000000 || tv->tv_sec > MAX_SECONDS_IN_MSEC_LONG)
+ return -1;
+
+ return (tv->tv_sec * 1000) + ((tv->tv_usec + 999) / 1000);
+}
+
+/*
+ Replacement for usleep on platforms that don't have one. Not guaranteed to
+ be any more finegrained than 1 msec.
+ */
+void
+evutil_usleep_(const struct timeval *tv)
+{
+ if (!tv)
+ return;
+#if defined(_WIN32)
+ {
+ long msec = evutil_tv_to_msec_(tv);
+ Sleep((DWORD)msec);
+ }
+#elif defined(EVENT__HAVE_NANOSLEEP)
+ {
+ struct timespec ts;
+ ts.tv_sec = tv->tv_sec;
+ ts.tv_nsec = tv->tv_usec*1000;
+ nanosleep(&ts, NULL);
+ }
+#elif defined(EVENT__HAVE_USLEEP)
+ /* Some systems don't like to usleep more than 999999 usec */
+ sleep(tv->tv_sec);
+ usleep(tv->tv_usec);
+#else
+ select(0, NULL, NULL, NULL, tv);
+#endif
+}
+
+/*
+ This function assumes it's called repeatedly with a
+ not-actually-so-monotonic time source whose outputs are in 'tv'. It
+ implements a trivial ratcheting mechanism so that the values never go
+ backwards.
+ */
+static void
+adjust_monotonic_time(struct evutil_monotonic_timer *base,
+ struct timeval *tv)
+{
+ evutil_timeradd(tv, &base->adjust_monotonic_clock, tv);
+
+ if (evutil_timercmp(tv, &base->last_time, <)) {
+ /* Guess it wasn't monotonic after all. */
+ struct timeval adjust;
+ evutil_timersub(&base->last_time, tv, &adjust);
+ evutil_timeradd(&adjust, &base->adjust_monotonic_clock,
+ &base->adjust_monotonic_clock);
+ *tv = base->last_time;
+ }
+ base->last_time = *tv;
+}
+
+/*
+ Allocate a new struct evutil_monotonic_timer
+ */
+struct evutil_monotonic_timer *
+evutil_monotonic_timer_new(void)
+{
+ struct evutil_monotonic_timer *p = NULL;
+
+ p = mm_malloc(sizeof(*p));
+ if (!p) goto done;
+
+ memset(p, 0, sizeof(*p));
+
+ done:
+ return p;
+}
+
+/*
+ Free a struct evutil_monotonic_timer
+ */
+void
+evutil_monotonic_timer_free(struct evutil_monotonic_timer *timer)
+{
+ if (timer) {
+ mm_free(timer);
+ }
+}
+
+/*
+ Set up a struct evutil_monotonic_timer for initial use
+ */
+int
+evutil_configure_monotonic_time(struct evutil_monotonic_timer *timer,
+ int flags)
+{
+ return evutil_configure_monotonic_time_(timer, flags);
+}
+
+/*
+ Query the current monotonic time
+ */
+int
+evutil_gettime_monotonic(struct evutil_monotonic_timer *timer,
+ struct timeval *tp)
+{
+ return evutil_gettime_monotonic_(timer, tp);
+}
+
+
+#if defined(HAVE_POSIX_MONOTONIC)
+/* =====
+ The POSIX clock_gettime() interface provides a few ways to get at a
+ monotonic clock. CLOCK_MONOTONIC is most widely supported. Linux also
+ provides a CLOCK_MONOTONIC_COARSE with accuracy of about 1-4 msec.
+
+ On all platforms I'm aware of, CLOCK_MONOTONIC really is monotonic.
+ Platforms don't agree about whether it should jump on a sleep/resume.
+ */
+
+int
+evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
+ int flags)
+{
+ /* CLOCK_MONOTONIC exists on FreeBSD, Linux, and Solaris. You need to
+ * check for it at runtime, because some older kernel versions won't
+ * have it working. */
+#ifdef CLOCK_MONOTONIC_COARSE
+ const int precise = flags & EV_MONOT_PRECISE;
+#endif
+ const int fallback = flags & EV_MONOT_FALLBACK;
+ struct timespec ts;
+
+#ifdef CLOCK_MONOTONIC_COARSE
+ if (CLOCK_MONOTONIC_COARSE < 0) {
+ /* Technically speaking, nothing keeps CLOCK_* from being
+ * negative (as far as I know). This check and the one below
+ * make sure that it's safe for us to use -1 as an "unset"
+ * value. */
+ event_errx(1,"I didn't expect CLOCK_MONOTONIC_COARSE to be < 0");
+ }
+ if (! precise && ! fallback) {
+ if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == 0) {
+ base->monotonic_clock = CLOCK_MONOTONIC_COARSE;
+ return 0;
+ }
+ }
+#endif
+ if (!fallback && clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
+ base->monotonic_clock = CLOCK_MONOTONIC;
+ return 0;
+ }
+
+ if (CLOCK_MONOTONIC < 0) {
+ event_errx(1,"I didn't expect CLOCK_MONOTONIC to be < 0");
+ }
+
+ base->monotonic_clock = -1;
+ return 0;
+}
+
+int
+evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
+ struct timeval *tp)
+{
+ struct timespec ts;
+
+ if (base->monotonic_clock < 0) {
+ if (evutil_gettimeofday(tp, NULL) < 0)
+ return -1;
+ adjust_monotonic_time(base, tp);
+ return 0;
+ }
+
+ if (clock_gettime(base->monotonic_clock, &ts) == -1)
+ return -1;
+ tp->tv_sec = ts.tv_sec;
+ tp->tv_usec = ts.tv_nsec / 1000;
+
+ return 0;
+}
+#endif
+
+#if defined(HAVE_MACH_MONOTONIC)
+/* ======
+ Apple is a little late to the POSIX party. And why not? Instead of
+ clock_gettime(), they provide mach_absolute_time(). Its units are not
+ fixed; we need to use mach_timebase_info() to get the right functions to
+ convert its units into nanoseconds.
+
+ To all appearances, mach_absolute_time() seems to be honest-to-goodness
+ monotonic. Whether it stops during sleep or not is unspecified in
+ principle, and dependent on CPU architecture in practice.
+ */
+
+int
+evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
+ int flags)
+{
+ const int fallback = flags & EV_MONOT_FALLBACK;
+ struct mach_timebase_info mi;
+ memset(base, 0, sizeof(*base));
+ /* OSX has mach_absolute_time() */
+ if (!fallback &&
+ mach_timebase_info(&mi) == 0 &&
+ mach_absolute_time() != 0) {
+ /* mach_timebase_info tells us how to convert
+ * mach_absolute_time() into nanoseconds, but we
+ * want to use microseconds instead. */
+ mi.denom *= 1000;
+ memcpy(&base->mach_timebase_units, &mi, sizeof(mi));
+ } else {
+ base->mach_timebase_units.numer = 0;
+ }
+ return 0;
+}
+
+int
+evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
+ struct timeval *tp)
+{
+ ev_uint64_t abstime, usec;
+ if (base->mach_timebase_units.numer == 0) {
+ if (evutil_gettimeofday(tp, NULL) < 0)
+ return -1;
+ adjust_monotonic_time(base, tp);
+ return 0;
+ }
+
+ abstime = mach_absolute_time();
+ usec = (abstime * base->mach_timebase_units.numer)
+ / (base->mach_timebase_units.denom);
+ tp->tv_sec = usec / 1000000;
+ tp->tv_usec = usec % 1000000;
+
+ return 0;
+}
+#endif
+
+#if defined(HAVE_WIN32_MONOTONIC)
+/* =====
+ Turn we now to Windows. Want monontonic time on Windows?
+
+ Windows has QueryPerformanceCounter(), which gives time most high-
+ resolution time. It's a pity it's not so monotonic in practice; it's
+ also got some fun bugs, especially: with older Windowses, under
+ virtualizations, with funny hardware, on multiprocessor systems, and so
+ on. PEP418 [1] has a nice roundup of the issues here.
+
+ There's GetTickCount64() on Vista and later, which gives a number of 1-msec
+ ticks since startup. The accuracy here might be as bad as 10-20 msec, I
+ hear. There's an undocumented function (NtSetTimerResolution) that
+ allegedly increases the accuracy. Good luck!
+
+ There's also GetTickCount(), which is only 32 bits, but seems to be
+ supported on pre-Vista versions of Windows. Apparently, you can coax
+ another 14 bits out of it, giving you 2231 years before rollover.
+
+ The less said about timeGetTime() the better.
+
+ "We don't care. We don't have to. We're the Phone Company."
+ -- Lily Tomlin, SNL
+
+ Our strategy, if precise timers are turned off, is to just use the best
+ GetTickCount equivalent available. If we've been asked for precise timing,
+ then we mostly[2] assume that GetTickCount is monotonic, and correct
+ GetPerformanceCounter to approximate it.
+
+ [1] http://www.python.org/dev/peps/pep-0418
+ [2] Of course, we feed the Windows stuff into adjust_monotonic_time()
+ anyway, just in case it isn't.
+
+ */
+/*
+ Parts of our logic in the win32 timer code here are closely based on
+ BitTorrent's libUTP library. That code is subject to the following
+ license:
+
+ Copyright (c) 2010 BitTorrent, Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+static ev_uint64_t
+evutil_GetTickCount_(struct evutil_monotonic_timer *base)
+{
+ if (base->GetTickCount64_fn) {
+ /* Let's just use GetTickCount64 if we can. */
+ return base->GetTickCount64_fn();
+ } else if (base->GetTickCount_fn) {
+ /* Greg Hazel assures me that this works, that BitTorrent has
+ * done it for years, and this it won't turn around and
+ * bite us. He says they found it on some game programmers'
+ * forum some time around 2007.
+ */
+ ev_uint64_t v = base->GetTickCount_fn();
+ return (DWORD)v | ((v >> 18) & 0xFFFFFFFF00000000);
+ } else {
+ /* Here's the fallback implementation. We have to use
+ * GetTickCount() with its given signature, so we only get
+ * 32 bits worth of milliseconds, which will roll ove every
+ * 49 days or so. */
+ DWORD ticks = GetTickCount();
+ if (ticks < base->last_tick_count) {
+ base->adjust_tick_count += ((ev_uint64_t)1) << 32;
+ }
+ base->last_tick_count = ticks;
+ return ticks + base->adjust_tick_count;
+ }
+}
+
+int
+evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
+ int flags)
+{
+ const int precise = flags & EV_MONOT_PRECISE;
+ const int fallback = flags & EV_MONOT_FALLBACK;
+ HANDLE h;
+ memset(base, 0, sizeof(*base));
+
+ h = evutil_load_windows_system_library_(TEXT("kernel32.dll"));
+ if (h != NULL && !fallback) {
+ base->GetTickCount64_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount64");
+ base->GetTickCount_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount");
+ }
+
+ base->first_tick = base->last_tick_count = evutil_GetTickCount_(base);
+ if (precise && !fallback) {
+ LARGE_INTEGER freq;
+ if (QueryPerformanceFrequency(&freq)) {
+ LARGE_INTEGER counter;
+ QueryPerformanceCounter(&counter);
+ base->first_counter = counter.QuadPart;
+ base->usec_per_count = 1.0e6 / freq.QuadPart;
+ base->use_performance_counter = 1;
+ }
+ }
+
+ return 0;
+}
+
+static inline ev_int64_t
+abs64(ev_int64_t i)
+{
+ return i < 0 ? -i : i;
+}
+
+
+int
+evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
+ struct timeval *tp)
+{
+ ev_uint64_t ticks = evutil_GetTickCount_(base);
+ if (base->use_performance_counter) {
+ /* Here's a trick we took from BitTorrent's libutp, at Greg
+ * Hazel's recommendation. We use QueryPerformanceCounter for
+ * our high-resolution timer, but use GetTickCount*() to keep
+ * it sane, and adjust_monotonic_time() to keep it monotonic.
+ */
+ LARGE_INTEGER counter;
+ ev_int64_t counter_elapsed, counter_usec_elapsed, ticks_elapsed;
+ QueryPerformanceCounter(&counter);
+ counter_elapsed = (ev_int64_t)
+ (counter.QuadPart - base->first_counter);
+ ticks_elapsed = ticks - base->first_tick;
+ /* TODO: This may upset VC6. If you need this to work with
+ * VC6, please supply an appropriate patch. */
+ counter_usec_elapsed = (ev_int64_t)
+ (counter_elapsed * base->usec_per_count);
+
+ if (abs64(ticks_elapsed*1000 - counter_usec_elapsed) > 1000000) {
+ /* It appears that the QueryPerformanceCounter()
+ * result is more than 1 second away from
+ * GetTickCount() result. Let's adjust it to be as
+ * accurate as we can; adjust_monotnonic_time() below
+ * will keep it monotonic. */
+ counter_usec_elapsed = ticks_elapsed * 1000;
+ base->first_counter = (ev_uint64_t) (counter.QuadPart - counter_usec_elapsed / base->usec_per_count);
+ }
+ tp->tv_sec = (time_t) (counter_usec_elapsed / 1000000);
+ tp->tv_usec = counter_usec_elapsed % 1000000;
+
+ } else {
+ /* We're just using GetTickCount(). */
+ tp->tv_sec = (time_t) (ticks / 1000);
+ tp->tv_usec = (ticks % 1000) * 1000;
+ }
+ adjust_monotonic_time(base, tp);
+
+ return 0;
+}
+#endif
+
+#if defined(HAVE_FALLBACK_MONOTONIC)
+/* =====
+ And if none of the other options work, let's just use gettimeofday(), and
+ ratchet it forward so that it acts like a monotonic timer, whether it
+ wants to or not.
+ */
+
+int
+evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
+ int precise)
+{
+ memset(base, 0, sizeof(*base));
+ return 0;
+}
+
+int
+evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
+ struct timeval *tp)
+{
+ if (evutil_gettimeofday(tp, NULL) < 0)
+ return -1;
+ adjust_monotonic_time(base, tp);
+ return 0;
+
+}
+#endif
diff --git a/libs/libevent/src/ht-internal.h b/libs/libevent/src/ht-internal.h
new file mode 100644
index 0000000000..50375bbaa9
--- /dev/null
+++ b/libs/libevent/src/ht-internal.h
@@ -0,0 +1,487 @@
+/* Copyright 2002 Christopher Clark */
+/* Copyright 2005-2012 Nick Mathewson */
+/* Copyright 2009-2012 Niels Provos and Nick Mathewson */
+/* See license at end. */
+
+/* Based on ideas by Christopher Clark and interfaces from Niels Provos. */
+
+#ifndef HT_INTERNAL_H_INCLUDED_
+#define HT_INTERNAL_H_INCLUDED_
+
+#define HT_HEAD(name, type) \
+ struct name { \
+ /* The hash table itself. */ \
+ struct type **hth_table; \
+ /* How long is the hash table? */ \
+ unsigned hth_table_length; \
+ /* How many elements does the table contain? */ \
+ unsigned hth_n_entries; \
+ /* How many elements will we allow in the table before resizing it? */ \
+ unsigned hth_load_limit; \
+ /* Position of hth_table_length in the primes table. */ \
+ int hth_prime_idx; \
+ }
+
+#define HT_INITIALIZER() \
+ { NULL, 0, 0, 0, -1 }
+
+#ifdef HT_NO_CACHE_HASH_VALUES
+#define HT_ENTRY(type) \
+ struct { \
+ struct type *hte_next; \
+ }
+#else
+#define HT_ENTRY(type) \
+ struct { \
+ struct type *hte_next; \
+ unsigned hte_hash; \
+ }
+#endif
+
+#define HT_EMPTY(head) \
+ ((head)->hth_n_entries == 0)
+
+/* How many elements in 'head'? */
+#define HT_SIZE(head) \
+ ((head)->hth_n_entries)
+
+/* Return memory usage for a hashtable (not counting the entries themselves) */
+#define HT_MEM_USAGE(head) \
+ (sizeof(*head) + (head)->hth_table_length * sizeof(void*))
+
+#define HT_FIND(name, head, elm) name##_HT_FIND((head), (elm))
+#define HT_INSERT(name, head, elm) name##_HT_INSERT((head), (elm))
+#define HT_REPLACE(name, head, elm) name##_HT_REPLACE((head), (elm))
+#define HT_REMOVE(name, head, elm) name##_HT_REMOVE((head), (elm))
+#define HT_START(name, head) name##_HT_START(head)
+#define HT_NEXT(name, head, elm) name##_HT_NEXT((head), (elm))
+#define HT_NEXT_RMV(name, head, elm) name##_HT_NEXT_RMV((head), (elm))
+#define HT_CLEAR(name, head) name##_HT_CLEAR(head)
+#define HT_INIT(name, head) name##_HT_INIT(head)
+/* Helper: */
+static inline unsigned
+ht_improve_hash_(unsigned h)
+{
+ /* Aim to protect against poor hash functions by adding logic here
+ * - logic taken from java 1.4 hashtable source */
+ h += ~(h << 9);
+ h ^= ((h >> 14) | (h << 18)); /* >>> */
+ h += (h << 4);
+ h ^= ((h >> 10) | (h << 22)); /* >>> */
+ return h;
+}
+
+#if 0
+/** Basic string hash function, from Java standard String.hashCode(). */
+static inline unsigned
+ht_string_hash_(const char *s)
+{
+ unsigned h = 0;
+ int m = 1;
+ while (*s) {
+ h += ((signed char)*s++)*m;
+ m = (m<<5)-1; /* m *= 31 */
+ }
+ return h;
+}
+#endif
+
+/** Basic string hash function, from Python's str.__hash__() */
+static inline unsigned
+ht_string_hash_(const char *s)
+{
+ unsigned h;
+ const unsigned char *cp = (const unsigned char *)s;
+ h = *cp << 7;
+ while (*cp) {
+ h = (1000003*h) ^ *cp++;
+ }
+ /* This conversion truncates the length of the string, but that's ok. */
+ h ^= (unsigned)(cp-(const unsigned char*)s);
+ return h;
+}
+
+#ifndef HT_NO_CACHE_HASH_VALUES
+#define HT_SET_HASH_(elm, field, hashfn) \
+ do { (elm)->field.hte_hash = hashfn(elm); } while (0)
+#define HT_SET_HASHVAL_(elm, field, val) \
+ do { (elm)->field.hte_hash = (val); } while (0)
+#define HT_ELT_HASH_(elm, field, hashfn) \
+ ((elm)->field.hte_hash)
+#else
+#define HT_SET_HASH_(elm, field, hashfn) \
+ ((void)0)
+#define HT_ELT_HASH_(elm, field, hashfn) \
+ (hashfn(elm))
+#define HT_SET_HASHVAL_(elm, field, val) \
+ ((void)0)
+#endif
+
+/* Helper: alias for the bucket containing 'elm'. */
+#define HT_BUCKET_(head, field, elm, hashfn) \
+ ((head)->hth_table[HT_ELT_HASH_(elm,field,hashfn) % head->hth_table_length])
+
+#define HT_FOREACH(x, name, head) \
+ for ((x) = HT_START(name, head); \
+ (x) != NULL; \
+ (x) = HT_NEXT(name, head, x))
+
+#define HT_PROTOTYPE(name, type, field, hashfn, eqfn) \
+ int name##_HT_GROW(struct name *ht, unsigned min_capacity); \
+ void name##_HT_CLEAR(struct name *ht); \
+ int name##_HT_REP_IS_BAD_(const struct name *ht); \
+ static inline void \
+ name##_HT_INIT(struct name *head) { \
+ head->hth_table_length = 0; \
+ head->hth_table = NULL; \
+ head->hth_n_entries = 0; \
+ head->hth_load_limit = 0; \
+ head->hth_prime_idx = -1; \
+ } \
+ /* Helper: returns a pointer to the right location in the table \
+ * 'head' to find or insert the element 'elm'. */ \
+ static inline struct type ** \
+ name##_HT_FIND_P_(struct name *head, struct type *elm) \
+ { \
+ struct type **p; \
+ if (!head->hth_table) \
+ return NULL; \
+ p = &HT_BUCKET_(head, field, elm, hashfn); \
+ while (*p) { \
+ if (eqfn(*p, elm)) \
+ return p; \
+ p = &(*p)->field.hte_next; \
+ } \
+ return p; \
+ } \
+ /* Return a pointer to the element in the table 'head' matching 'elm', \
+ * or NULL if no such element exists */ \
+ static inline struct type * \
+ name##_HT_FIND(const struct name *head, struct type *elm) \
+ { \
+ struct type **p; \
+ struct name *h = (struct name *) head; \
+ HT_SET_HASH_(elm, field, hashfn); \
+ p = name##_HT_FIND_P_(h, elm); \
+ return p ? *p : NULL; \
+ } \
+ /* Insert the element 'elm' into the table 'head'. Do not call this \
+ * function if the table might already contain a matching element. */ \
+ static inline void \
+ name##_HT_INSERT(struct name *head, struct type *elm) \
+ { \
+ struct type **p; \
+ if (!head->hth_table || head->hth_n_entries >= head->hth_load_limit) \
+ name##_HT_GROW(head, head->hth_n_entries+1); \
+ ++head->hth_n_entries; \
+ HT_SET_HASH_(elm, field, hashfn); \
+ p = &HT_BUCKET_(head, field, elm, hashfn); \
+ elm->field.hte_next = *p; \
+ *p = elm; \
+ } \
+ /* Insert the element 'elm' into the table 'head'. If there already \
+ * a matching element in the table, replace that element and return \
+ * it. */ \
+ static inline struct type * \
+ name##_HT_REPLACE(struct name *head, struct type *elm) \
+ { \
+ struct type **p, *r; \
+ if (!head->hth_table || head->hth_n_entries >= head->hth_load_limit) \
+ name##_HT_GROW(head, head->hth_n_entries+1); \
+ HT_SET_HASH_(elm, field, hashfn); \
+ p = name##_HT_FIND_P_(head, elm); \
+ r = *p; \
+ *p = elm; \
+ if (r && (r!=elm)) { \
+ elm->field.hte_next = r->field.hte_next; \
+ r->field.hte_next = NULL; \
+ return r; \
+ } else { \
+ ++head->hth_n_entries; \
+ return NULL; \
+ } \
+ } \
+ /* Remove any element matching 'elm' from the table 'head'. If such \
+ * an element is found, return it; otherwise return NULL. */ \
+ static inline struct type * \
+ name##_HT_REMOVE(struct name *head, struct type *elm) \
+ { \
+ struct type **p, *r; \
+ HT_SET_HASH_(elm, field, hashfn); \
+ p = name##_HT_FIND_P_(head,elm); \
+ if (!p || !*p) \
+ return NULL; \
+ r = *p; \
+ *p = r->field.hte_next; \
+ r->field.hte_next = NULL; \
+ --head->hth_n_entries; \
+ return r; \
+ } \
+ /* Invoke the function 'fn' on every element of the table 'head', \
+ * using 'data' as its second argument. If the function returns \
+ * nonzero, remove the most recently examined element before invoking \
+ * the function again. */ \
+ static inline void \
+ name##_HT_FOREACH_FN(struct name *head, \
+ int (*fn)(struct type *, void *), \
+ void *data) \
+ { \
+ unsigned idx; \
+ struct type **p, **nextp, *next; \
+ if (!head->hth_table) \
+ return; \
+ for (idx=0; idx < head->hth_table_length; ++idx) { \
+ p = &head->hth_table[idx]; \
+ while (*p) { \
+ nextp = &(*p)->field.hte_next; \
+ next = *nextp; \
+ if (fn(*p, data)) { \
+ --head->hth_n_entries; \
+ *p = next; \
+ } else { \
+ p = nextp; \
+ } \
+ } \
+ } \
+ } \
+ /* Return a pointer to the first element in the table 'head', under \
+ * an arbitrary order. This order is stable under remove operations, \
+ * but not under others. If the table is empty, return NULL. */ \
+ static inline struct type ** \
+ name##_HT_START(struct name *head) \
+ { \
+ unsigned b = 0; \
+ while (b < head->hth_table_length) { \
+ if (head->hth_table[b]) \
+ return &head->hth_table[b]; \
+ ++b; \
+ } \
+ return NULL; \
+ } \
+ /* Return the next element in 'head' after 'elm', under the arbitrary \
+ * order used by HT_START. If there are no more elements, return \
+ * NULL. If 'elm' is to be removed from the table, you must call \
+ * this function for the next value before you remove it. \
+ */ \
+ static inline struct type ** \
+ name##_HT_NEXT(struct name *head, struct type **elm) \
+ { \
+ if ((*elm)->field.hte_next) { \
+ return &(*elm)->field.hte_next; \
+ } else { \
+ unsigned b = (HT_ELT_HASH_(*elm, field, hashfn) % head->hth_table_length)+1; \
+ while (b < head->hth_table_length) { \
+ if (head->hth_table[b]) \
+ return &head->hth_table[b]; \
+ ++b; \
+ } \
+ return NULL; \
+ } \
+ } \
+ static inline struct type ** \
+ name##_HT_NEXT_RMV(struct name *head, struct type **elm) \
+ { \
+ unsigned h = HT_ELT_HASH_(*elm, field, hashfn); \
+ *elm = (*elm)->field.hte_next; \
+ --head->hth_n_entries; \
+ if (*elm) { \
+ return elm; \
+ } else { \
+ unsigned b = (h % head->hth_table_length)+1; \
+ while (b < head->hth_table_length) { \
+ if (head->hth_table[b]) \
+ return &head->hth_table[b]; \
+ ++b; \
+ } \
+ return NULL; \
+ } \
+ }
+
+#define HT_GENERATE(name, type, field, hashfn, eqfn, load, mallocfn, \
+ reallocfn, freefn) \
+ static unsigned name##_PRIMES[] = { \
+ 53, 97, 193, 389, \
+ 769, 1543, 3079, 6151, \
+ 12289, 24593, 49157, 98317, \
+ 196613, 393241, 786433, 1572869, \
+ 3145739, 6291469, 12582917, 25165843, \
+ 50331653, 100663319, 201326611, 402653189, \
+ 805306457, 1610612741 \
+ }; \
+ static unsigned name##_N_PRIMES = \
+ (unsigned)(sizeof(name##_PRIMES)/sizeof(name##_PRIMES[0])); \
+ /* Expand the internal table of 'head' until it is large enough to \
+ * hold 'size' elements. Return 0 on success, -1 on allocation \
+ * failure. */ \
+ int \
+ name##_HT_GROW(struct name *head, unsigned size) \
+ { \
+ unsigned new_len, new_load_limit; \
+ int prime_idx; \
+ struct type **new_table; \
+ if (head->hth_prime_idx == (int)name##_N_PRIMES - 1) \
+ return 0; \
+ if (head->hth_load_limit > size) \
+ return 0; \
+ prime_idx = head->hth_prime_idx; \
+ do { \
+ new_len = name##_PRIMES[++prime_idx]; \
+ new_load_limit = (unsigned)(load*new_len); \
+ } while (new_load_limit <= size && \
+ prime_idx < (int)name##_N_PRIMES); \
+ if ((new_table = mallocfn(new_len*sizeof(struct type*)))) { \
+ unsigned b; \
+ memset(new_table, 0, new_len*sizeof(struct type*)); \
+ for (b = 0; b < head->hth_table_length; ++b) { \
+ struct type *elm, *next; \
+ unsigned b2; \
+ elm = head->hth_table[b]; \
+ while (elm) { \
+ next = elm->field.hte_next; \
+ b2 = HT_ELT_HASH_(elm, field, hashfn) % new_len; \
+ elm->field.hte_next = new_table[b2]; \
+ new_table[b2] = elm; \
+ elm = next; \
+ } \
+ } \
+ if (head->hth_table) \
+ freefn(head->hth_table); \
+ head->hth_table = new_table; \
+ } else { \
+ unsigned b, b2; \
+ new_table = reallocfn(head->hth_table, new_len*sizeof(struct type*)); \
+ if (!new_table) return -1; \
+ memset(new_table + head->hth_table_length, 0, \
+ (new_len - head->hth_table_length)*sizeof(struct type*)); \
+ for (b=0; b < head->hth_table_length; ++b) { \
+ struct type *e, **pE; \
+ for (pE = &new_table[b], e = *pE; e != NULL; e = *pE) { \
+ b2 = HT_ELT_HASH_(e, field, hashfn) % new_len; \
+ if (b2 == b) { \
+ pE = &e->field.hte_next; \
+ } else { \
+ *pE = e->field.hte_next; \
+ e->field.hte_next = new_table[b2]; \
+ new_table[b2] = e; \
+ } \
+ } \
+ } \
+ head->hth_table = new_table; \
+ } \
+ head->hth_table_length = new_len; \
+ head->hth_prime_idx = prime_idx; \
+ head->hth_load_limit = new_load_limit; \
+ return 0; \
+ } \
+ /* Free all storage held by 'head'. Does not free 'head' itself, or \
+ * individual elements. */ \
+ void \
+ name##_HT_CLEAR(struct name *head) \
+ { \
+ if (head->hth_table) \
+ freefn(head->hth_table); \
+ name##_HT_INIT(head); \
+ } \
+ /* Debugging helper: return false iff the representation of 'head' is \
+ * internally consistent. */ \
+ int \
+ name##_HT_REP_IS_BAD_(const struct name *head) \
+ { \
+ unsigned n, i; \
+ struct type *elm; \
+ if (!head->hth_table_length) { \
+ if (!head->hth_table && !head->hth_n_entries && \
+ !head->hth_load_limit && head->hth_prime_idx == -1) \
+ return 0; \
+ else \
+ return 1; \
+ } \
+ if (!head->hth_table || head->hth_prime_idx < 0 || \
+ !head->hth_load_limit) \
+ return 2; \
+ if (head->hth_n_entries > head->hth_load_limit) \
+ return 3; \
+ if (head->hth_table_length != name##_PRIMES[head->hth_prime_idx]) \
+ return 4; \
+ if (head->hth_load_limit != (unsigned)(load*head->hth_table_length)) \
+ return 5; \
+ for (n = i = 0; i < head->hth_table_length; ++i) { \
+ for (elm = head->hth_table[i]; elm; elm = elm->field.hte_next) { \
+ if (HT_ELT_HASH_(elm, field, hashfn) != hashfn(elm)) \
+ return 1000 + i; \
+ if ((HT_ELT_HASH_(elm, field, hashfn) % head->hth_table_length) != i) \
+ return 10000 + i; \
+ ++n; \
+ } \
+ } \
+ if (n != head->hth_n_entries) \
+ return 6; \
+ return 0; \
+ }
+
+/** Implements an over-optimized "find and insert if absent" block;
+ * not meant for direct usage by typical code, or usage outside the critical
+ * path.*/
+#define HT_FIND_OR_INSERT_(name, field, hashfn, head, eltype, elm, var, y, n) \
+ { \
+ struct name *var##_head_ = head; \
+ struct eltype **var; \
+ if (!var##_head_->hth_table || \
+ var##_head_->hth_n_entries >= var##_head_->hth_load_limit) \
+ name##_HT_GROW(var##_head_, var##_head_->hth_n_entries+1); \
+ HT_SET_HASH_((elm), field, hashfn); \
+ var = name##_HT_FIND_P_(var##_head_, (elm)); \
+ if (*var) { \
+ y; \
+ } else { \
+ n; \
+ } \
+ }
+#define HT_FOI_INSERT_(field, head, elm, newent, var) \
+ { \
+ HT_SET_HASHVAL_(newent, field, (elm)->field.hte_hash); \
+ newent->field.hte_next = NULL; \
+ *var = newent; \
+ ++((head)->hth_n_entries); \
+ }
+
+/*
+ * Copyright 2005, Nick Mathewson. Implementation logic is adapted from code
+ * by Christopher Clark, retrofit to allow drop-in memory management, and to
+ * use the same interface as Niels Provos's tree.h. This is probably still
+ * a derived work, so the original license below still applies.
+ *
+ * Copyright (c) 2002, Christopher Clark
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of the original author; nor the names of any contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+ * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#endif
+
diff --git a/libs/libevent/src/http-internal.h b/libs/libevent/src/http-internal.h
new file mode 100644
index 0000000000..ba6e49ef9b
--- /dev/null
+++ b/libs/libevent/src/http-internal.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2001-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * This header file contains definitions for dealing with HTTP requests
+ * that are internal to libevent. As user of the library, you should not
+ * need to know about these.
+ */
+
+#ifndef HTTP_INTERNAL_H_INCLUDED_
+#define HTTP_INTERNAL_H_INCLUDED_
+
+#include "event2/event_struct.h"
+#include "util-internal.h"
+#include "defer-internal.h"
+
+#define HTTP_CONNECT_TIMEOUT 45
+#define HTTP_WRITE_TIMEOUT 50
+#define HTTP_READ_TIMEOUT 50
+
+#define HTTP_PREFIX "http://"
+#define HTTP_DEFAULTPORT 80
+
+enum message_read_status {
+ ALL_DATA_READ = 1,
+ MORE_DATA_EXPECTED = 0,
+ DATA_CORRUPTED = -1,
+ REQUEST_CANCELED = -2,
+ DATA_TOO_LONG = -3
+};
+
+struct evbuffer;
+struct addrinfo;
+struct evhttp_request;
+
+/* Indicates an unknown request method. */
+#define EVHTTP_REQ_UNKNOWN_ (1<<15)
+
+enum evhttp_connection_state {
+ EVCON_DISCONNECTED, /**< not currently connected not trying either*/
+ EVCON_CONNECTING, /**< tries to currently connect */
+ EVCON_IDLE, /**< connection is established */
+ EVCON_READING_FIRSTLINE,/**< reading Request-Line (incoming conn) or
+ **< Status-Line (outgoing conn) */
+ EVCON_READING_HEADERS, /**< reading request/response headers */
+ EVCON_READING_BODY, /**< reading request/response body */
+ EVCON_READING_TRAILER, /**< reading request/response chunked trailer */
+ EVCON_WRITING /**< writing request/response headers/body */
+};
+
+struct event_base;
+
+/* A client or server connection. */
+struct evhttp_connection {
+ /* we use this tailq only if this connection was created for an http
+ * server */
+ TAILQ_ENTRY(evhttp_connection) next;
+
+ evutil_socket_t fd;
+ struct bufferevent *bufev;
+
+ struct event retry_ev; /* for retrying connects */
+
+ char *bind_address; /* address to use for binding the src */
+ unsigned short bind_port; /* local port for binding the src */
+
+ char *address; /* address to connect to */
+ unsigned short port;
+
+ size_t max_headers_size;
+ ev_uint64_t max_body_size;
+
+ int flags;
+#define EVHTTP_CON_INCOMING 0x0001 /* only one request on it ever */
+#define EVHTTP_CON_OUTGOING 0x0002 /* multiple requests possible */
+#define EVHTTP_CON_CLOSEDETECT 0x0004 /* detecting if persistent close */
+/* set when we want to auto free the connection */
+#define EVHTTP_CON_AUTOFREE EVHTTP_CON_PUBLIC_FLAGS_END
+
+ struct timeval timeout; /* timeout for events */
+ int retry_cnt; /* retry count */
+ int retry_max; /* maximum number of retries */
+ struct timeval initial_retry_timeout; /* Timeout for low long to wait
+ * after first failing attempt
+ * before retry */
+
+ enum evhttp_connection_state state;
+
+ /* for server connections, the http server they are connected with */
+ struct evhttp *http_server;
+
+ TAILQ_HEAD(evcon_requestq, evhttp_request) requests;
+
+ void (*cb)(struct evhttp_connection *, void *);
+ void *cb_arg;
+
+ void (*closecb)(struct evhttp_connection *, void *);
+ void *closecb_arg;
+
+ struct event_callback read_more_deferred_cb;
+
+ struct event_base *base;
+ struct evdns_base *dns_base;
+ int ai_family;
+};
+
+/* A callback for an http server */
+struct evhttp_cb {
+ TAILQ_ENTRY(evhttp_cb) next;
+
+ char *what;
+
+ void (*cb)(struct evhttp_request *req, void *);
+ void *cbarg;
+};
+
+/* both the http server as well as the rpc system need to queue connections */
+TAILQ_HEAD(evconq, evhttp_connection);
+
+/* each bound socket is stored in one of these */
+struct evhttp_bound_socket {
+ TAILQ_ENTRY(evhttp_bound_socket) next;
+
+ struct evconnlistener *listener;
+};
+
+/* server alias list item. */
+struct evhttp_server_alias {
+ TAILQ_ENTRY(evhttp_server_alias) next;
+
+ char *alias; /* the server alias. */
+};
+
+struct evhttp {
+ /* Next vhost, if this is a vhost. */
+ TAILQ_ENTRY(evhttp) next_vhost;
+
+ /* All listeners for this host */
+ TAILQ_HEAD(boundq, evhttp_bound_socket) sockets;
+
+ TAILQ_HEAD(httpcbq, evhttp_cb) callbacks;
+
+ /* All live connections on this host. */
+ struct evconq connections;
+
+ TAILQ_HEAD(vhostsq, evhttp) virtualhosts;
+
+ TAILQ_HEAD(aliasq, evhttp_server_alias) aliases;
+
+ /* NULL if this server is not a vhost */
+ char *vhost_pattern;
+
+ struct timeval timeout;
+
+ size_t default_max_headers_size;
+ ev_uint64_t default_max_body_size;
+ const char *default_content_type;
+
+ /* Bitmask of all HTTP methods that we accept and pass to user
+ * callbacks. */
+ ev_uint16_t allowed_methods;
+
+ /* Fallback callback if all the other callbacks for this connection
+ don't match. */
+ void (*gencb)(struct evhttp_request *req, void *);
+ void *gencbarg;
+ struct bufferevent* (*bevcb)(struct event_base *, void *);
+ void *bevcbarg;
+
+ struct event_base *base;
+};
+
+/* XXX most of these functions could be static. */
+
+/* resets the connection; can be reused for more requests */
+void evhttp_connection_reset_(struct evhttp_connection *);
+
+/* connects if necessary */
+int evhttp_connection_connect_(struct evhttp_connection *);
+
+enum evhttp_request_error;
+/* notifies the current request that it failed; resets connection */
+void evhttp_connection_fail_(struct evhttp_connection *,
+ enum evhttp_request_error error);
+
+enum message_read_status;
+
+enum message_read_status evhttp_parse_firstline_(struct evhttp_request *, struct evbuffer*);
+enum message_read_status evhttp_parse_headers_(struct evhttp_request *, struct evbuffer*);
+
+void evhttp_start_read_(struct evhttp_connection *);
+
+/* response sending HTML the data in the buffer */
+void evhttp_response_code_(struct evhttp_request *, int, const char *);
+void evhttp_send_page_(struct evhttp_request *, struct evbuffer *);
+
+int evhttp_decode_uri_internal(const char *uri, size_t length,
+ char *ret, int decode_plus);
+
+#endif /* _HTTP_H */
diff --git a/libs/libevent/src/http.c b/libs/libevent/src/http.c
new file mode 100644
index 0000000000..fd7ce3cbf2
--- /dev/null
+++ b/libs/libevent/src/http.c
@@ -0,0 +1,4892 @@
+/*
+ * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef EVENT__HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifdef HAVE_SYS_IOCCOM_H
+#include <sys/ioccom.h>
+#endif
+#ifdef EVENT__HAVE_SYS_RESOURCE_H
+#include <sys/resource.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef EVENT__HAVE_SYS_WAIT_H
+#include <sys/wait.h>
+#endif
+
+#ifndef _WIN32
+#include <sys/socket.h>
+#include <sys/stat.h>
+#else
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#endif
+
+#include <sys/queue.h>
+
+#ifdef EVENT__HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+#ifdef EVENT__HAVE_ARPA_INET_H
+#include <arpa/inet.h>
+#endif
+#ifdef EVENT__HAVE_NETDB_H
+#include <netdb.h>
+#endif
+
+#ifdef _WIN32
+#include <winsock2.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _WIN32
+#include <syslog.h>
+#endif
+#include <signal.h>
+#include <time.h>
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef EVENT__HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+
+#undef timeout_pending
+#undef timeout_initialized
+
+#include "strlcpy-internal.h"
+#include "event2/http.h"
+#include "event2/event.h"
+#include "event2/buffer.h"
+#include "event2/bufferevent.h"
+#include "event2/http_struct.h"
+#include "event2/http_compat.h"
+#include "event2/util.h"
+#include "event2/listener.h"
+#include "log-internal.h"
+#include "util-internal.h"
+#include "http-internal.h"
+#include "mm-internal.h"
+#include "bufferevent-internal.h"
+
+#ifndef EVENT__HAVE_GETNAMEINFO
+#define NI_MAXSERV 32
+#define NI_MAXHOST 1025
+
+#ifndef NI_NUMERICHOST
+#define NI_NUMERICHOST 1
+#endif
+
+#ifndef NI_NUMERICSERV
+#define NI_NUMERICSERV 2
+#endif
+
+static int
+fake_getnameinfo(const struct sockaddr *sa, size_t salen, char *host,
+ size_t hostlen, char *serv, size_t servlen, int flags)
+{
+ struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+
+ if (serv != NULL) {
+ char tmpserv[16];
+ evutil_snprintf(tmpserv, sizeof(tmpserv),
+ "%d", ntohs(sin->sin_port));
+ if (strlcpy(serv, tmpserv, servlen) >= servlen)
+ return (-1);
+ }
+
+ if (host != NULL) {
+ if (flags & NI_NUMERICHOST) {
+ if (strlcpy(host, inet_ntoa(sin->sin_addr),
+ hostlen) >= hostlen)
+ return (-1);
+ else
+ return (0);
+ } else {
+ struct hostent *hp;
+ hp = gethostbyaddr((char *)&sin->sin_addr,
+ sizeof(struct in_addr), AF_INET);
+ if (hp == NULL)
+ return (-2);
+
+ if (strlcpy(host, hp->h_name, hostlen) >= hostlen)
+ return (-1);
+ else
+ return (0);
+ }
+ }
+ return (0);
+}
+
+#endif
+
+#define REQ_VERSION_BEFORE(req, major_v, minor_v) \
+ ((req)->major < (major_v) || \
+ ((req)->major == (major_v) && (req)->minor < (minor_v)))
+
+#define REQ_VERSION_ATLEAST(req, major_v, minor_v) \
+ ((req)->major > (major_v) || \
+ ((req)->major == (major_v) && (req)->minor >= (minor_v)))
+
+#ifndef MIN
+#define MIN(a,b) (((a)<(b))?(a):(b))
+#endif
+
+extern int debug;
+
+static evutil_socket_t bind_socket_ai(struct evutil_addrinfo *, int reuse);
+static evutil_socket_t bind_socket(const char *, ev_uint16_t, int reuse);
+static void name_from_addr(struct sockaddr *, ev_socklen_t, char **, char **);
+static int evhttp_associate_new_request_with_connection(
+ struct evhttp_connection *evcon);
+static void evhttp_connection_start_detectclose(
+ struct evhttp_connection *evcon);
+static void evhttp_connection_stop_detectclose(
+ struct evhttp_connection *evcon);
+static void evhttp_request_dispatch(struct evhttp_connection* evcon);
+static void evhttp_read_firstline(struct evhttp_connection *evcon,
+ struct evhttp_request *req);
+static void evhttp_read_header(struct evhttp_connection *evcon,
+ struct evhttp_request *req);
+static int evhttp_add_header_internal(struct evkeyvalq *headers,
+ const char *key, const char *value);
+static const char *evhttp_response_phrase_internal(int code);
+static void evhttp_get_request(struct evhttp *, evutil_socket_t, struct sockaddr *, ev_socklen_t);
+static void evhttp_write_buffer(struct evhttp_connection *,
+ void (*)(struct evhttp_connection *, void *), void *);
+static void evhttp_make_header(struct evhttp_connection *, struct evhttp_request *);
+
+/* callbacks for bufferevent */
+static void evhttp_read_cb(struct bufferevent *, void *);
+static void evhttp_write_cb(struct bufferevent *, void *);
+static void evhttp_error_cb(struct bufferevent *bufev, short what, void *arg);
+static int evhttp_find_vhost(struct evhttp *http, struct evhttp **outhttp,
+ const char *hostname);
+
+#ifndef EVENT__HAVE_STRSEP
+/* strsep replacement for platforms that lack it. Only works if
+ * del is one character long. */
+static char *
+strsep(char **s, const char *del)
+{
+ char *d, *tok;
+ EVUTIL_ASSERT(strlen(del) == 1);
+ if (!s || !*s)
+ return NULL;
+ tok = *s;
+ d = strstr(tok, del);
+ if (d) {
+ *d = '\0';
+ *s = d + 1;
+ } else
+ *s = NULL;
+ return tok;
+}
+#endif
+
+static size_t
+html_replace(const char ch, const char **escaped)
+{
+ switch (ch) {
+ case '<':
+ *escaped = "&lt;";
+ return 4;
+ case '>':
+ *escaped = "&gt;";
+ return 4;
+ case '"':
+ *escaped = "&quot;";
+ return 6;
+ case '\'':
+ *escaped = "&#039;";
+ return 6;
+ case '&':
+ *escaped = "&amp;";
+ return 5;
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+/*
+ * Replaces <, >, ", ' and & with &lt;, &gt;, &quot;,
+ * &#039; and &amp; correspondingly.
+ *
+ * The returned string needs to be freed by the caller.
+ */
+
+char *
+evhttp_htmlescape(const char *html)
+{
+ size_t i;
+ size_t new_size = 0, old_size = 0;
+ char *escaped_html, *p;
+
+ if (html == NULL)
+ return (NULL);
+
+ old_size = strlen(html);
+ for (i = 0; i < old_size; ++i) {
+ const char *replaced = NULL;
+ const size_t replace_size = html_replace(html[i], &replaced);
+ if (replace_size > EV_SIZE_MAX - new_size) {
+ event_warn("%s: html_replace overflow", __func__);
+ return (NULL);
+ }
+ new_size += replace_size;
+ }
+
+ if (new_size == EV_SIZE_MAX)
+ return (NULL);
+ p = escaped_html = mm_malloc(new_size + 1);
+ if (escaped_html == NULL) {
+ event_warn("%s: malloc(%lu)", __func__,
+ (unsigned long)(new_size + 1));
+ return (NULL);
+ }
+ for (i = 0; i < old_size; ++i) {
+ const char *replaced = &html[i];
+ const size_t len = html_replace(html[i], &replaced);
+ memcpy(p, replaced, len);
+ p += len;
+ }
+
+ *p = '\0';
+
+ return (escaped_html);
+}
+
+/** Given an evhttp_cmd_type, returns a constant string containing the
+ * equivalent HTTP command, or NULL if the evhttp_command_type is
+ * unrecognized. */
+static const char *
+evhttp_method(enum evhttp_cmd_type type)
+{
+ const char *method;
+
+ switch (type) {
+ case EVHTTP_REQ_GET:
+ method = "GET";
+ break;
+ case EVHTTP_REQ_POST:
+ method = "POST";
+ break;
+ case EVHTTP_REQ_HEAD:
+ method = "HEAD";
+ break;
+ case EVHTTP_REQ_PUT:
+ method = "PUT";
+ break;
+ case EVHTTP_REQ_DELETE:
+ method = "DELETE";
+ break;
+ case EVHTTP_REQ_OPTIONS:
+ method = "OPTIONS";
+ break;
+ case EVHTTP_REQ_TRACE:
+ method = "TRACE";
+ break;
+ case EVHTTP_REQ_CONNECT:
+ method = "CONNECT";
+ break;
+ case EVHTTP_REQ_PATCH:
+ method = "PATCH";
+ break;
+ default:
+ method = NULL;
+ break;
+ }
+
+ return (method);
+}
+
+/**
+ * Determines if a response should have a body.
+ * Follows the rules in RFC 2616 section 4.3.
+ * @return 1 if the response MUST have a body; 0 if the response MUST NOT have
+ * a body.
+ */
+static int
+evhttp_response_needs_body(struct evhttp_request *req)
+{
+ return (req->response_code != HTTP_NOCONTENT &&
+ req->response_code != HTTP_NOTMODIFIED &&
+ (req->response_code < 100 || req->response_code >= 200) &&
+ req->type != EVHTTP_REQ_HEAD);
+}
+
+/** Helper: called after we've added some data to an evcon's bufferevent's
+ * output buffer. Sets the evconn's writing-is-done callback, and puts
+ * the bufferevent into writing mode.
+ */
+static void
+evhttp_write_buffer(struct evhttp_connection *evcon,
+ void (*cb)(struct evhttp_connection *, void *), void *arg)
+{
+ event_debug(("%s: preparing to write buffer\n", __func__));
+
+ /* Set call back */
+ evcon->cb = cb;
+ evcon->cb_arg = arg;
+
+ /* Disable the read callback: we don't actually care about data;
+ * we only care about close detection. (We don't disable reading,
+ * since we *do* want to learn about any close events.) */
+ bufferevent_setcb(evcon->bufev,
+ NULL, /*read*/
+ evhttp_write_cb,
+ evhttp_error_cb,
+ evcon);
+
+ bufferevent_enable(evcon->bufev, EV_WRITE);
+}
+
+static void
+evhttp_send_continue_done(struct evhttp_connection *evcon, void *arg)
+{
+ bufferevent_disable(evcon->bufev, EV_WRITE);
+}
+
+static void
+evhttp_send_continue(struct evhttp_connection *evcon,
+ struct evhttp_request *req)
+{
+ bufferevent_enable(evcon->bufev, EV_WRITE);
+ evbuffer_add_printf(bufferevent_get_output(evcon->bufev),
+ "HTTP/%d.%d 100 Continue\r\n\r\n",
+ req->major, req->minor);
+ evcon->cb = evhttp_send_continue_done;
+ evcon->cb_arg = NULL;
+ bufferevent_setcb(evcon->bufev,
+ evhttp_read_cb,
+ evhttp_write_cb,
+ evhttp_error_cb,
+ evcon);
+}
+
+/** Helper: returns true iff evconn is in any connected state. */
+static int
+evhttp_connected(struct evhttp_connection *evcon)
+{
+ switch (evcon->state) {
+ case EVCON_DISCONNECTED:
+ case EVCON_CONNECTING:
+ return (0);
+ case EVCON_IDLE:
+ case EVCON_READING_FIRSTLINE:
+ case EVCON_READING_HEADERS:
+ case EVCON_READING_BODY:
+ case EVCON_READING_TRAILER:
+ case EVCON_WRITING:
+ default:
+ return (1);
+ }
+}
+
+/* Create the headers needed for an outgoing HTTP request, adds them to
+ * the request's header list, and writes the request line to the
+ * connection's output buffer.
+ */
+static void
+evhttp_make_header_request(struct evhttp_connection *evcon,
+ struct evhttp_request *req)
+{
+ const char *method;
+
+ evhttp_remove_header(req->output_headers, "Proxy-Connection");
+
+ /* Generate request line */
+ method = evhttp_method(req->type);
+ evbuffer_add_printf(bufferevent_get_output(evcon->bufev),
+ "%s %s HTTP/%d.%d\r\n",
+ method, req->uri, req->major, req->minor);
+
+ /* Add the content length on a post or put request if missing */
+ if ((req->type == EVHTTP_REQ_POST || req->type == EVHTTP_REQ_PUT) &&
+ evhttp_find_header(req->output_headers, "Content-Length") == NULL){
+ char size[22];
+ evutil_snprintf(size, sizeof(size), EV_SIZE_FMT,
+ EV_SIZE_ARG(evbuffer_get_length(req->output_buffer)));
+ evhttp_add_header(req->output_headers, "Content-Length", size);
+ }
+}
+
+/** Return true if the list of headers in 'headers', intepreted with respect
+ * to flags, means that we should send a "connection: close" when the request
+ * is done. */
+static int
+evhttp_is_connection_close(int flags, struct evkeyvalq* headers)
+{
+ if (flags & EVHTTP_PROXY_REQUEST) {
+ /* proxy connection */
+ const char *connection = evhttp_find_header(headers, "Proxy-Connection");
+ return (connection == NULL || evutil_ascii_strcasecmp(connection, "keep-alive") != 0);
+ } else {
+ const char *connection = evhttp_find_header(headers, "Connection");
+ return (connection != NULL && evutil_ascii_strcasecmp(connection, "close") == 0);
+ }
+}
+static int
+evhttp_is_request_connection_close(struct evhttp_request *req)
+{
+ return
+ evhttp_is_connection_close(req->flags, req->input_headers) ||
+ evhttp_is_connection_close(req->flags, req->output_headers);
+}
+
+/* Return true iff 'headers' contains 'Connection: keep-alive' */
+static int
+evhttp_is_connection_keepalive(struct evkeyvalq* headers)
+{
+ const char *connection = evhttp_find_header(headers, "Connection");
+ return (connection != NULL
+ && evutil_ascii_strncasecmp(connection, "keep-alive", 10) == 0);
+}
+
+/* Add a correct "Date" header to headers, unless it already has one. */
+static void
+evhttp_maybe_add_date_header(struct evkeyvalq *headers)
+{
+ if (evhttp_find_header(headers, "Date") == NULL) {
+ char date[50];
+#ifndef _WIN32
+ struct tm cur;
+#endif
+ struct tm *cur_p;
+ time_t t = time(NULL);
+#ifdef _WIN32
+ cur_p = gmtime(&t);
+#else
+ gmtime_r(&t, &cur);
+ cur_p = &cur;
+#endif
+ if (strftime(date, sizeof(date),
+ "%a, %d %b %Y %H:%M:%S GMT", cur_p) != 0) {
+ evhttp_add_header(headers, "Date", date);
+ }
+ }
+}
+
+/* Add a "Content-Length" header with value 'content_length' to headers,
+ * unless it already has a content-length or transfer-encoding header. */
+static void
+evhttp_maybe_add_content_length_header(struct evkeyvalq *headers,
+ size_t content_length)
+{
+ if (evhttp_find_header(headers, "Transfer-Encoding") == NULL &&
+ evhttp_find_header(headers, "Content-Length") == NULL) {
+ char len[22];
+ evutil_snprintf(len, sizeof(len), EV_SIZE_FMT,
+ EV_SIZE_ARG(content_length));
+ evhttp_add_header(headers, "Content-Length", len);
+ }
+}
+
+/*
+ * Create the headers needed for an HTTP reply in req->output_headers,
+ * and write the first HTTP response for req line to evcon.
+ */
+static void
+evhttp_make_header_response(struct evhttp_connection *evcon,
+ struct evhttp_request *req)
+{
+ int is_keepalive = evhttp_is_connection_keepalive(req->input_headers);
+ evbuffer_add_printf(bufferevent_get_output(evcon->bufev),
+ "HTTP/%d.%d %d %s\r\n",
+ req->major, req->minor, req->response_code,
+ req->response_code_line);
+
+ if (req->major == 1) {
+ if (req->minor >= 1)
+ evhttp_maybe_add_date_header(req->output_headers);
+
+ /*
+ * if the protocol is 1.0; and the connection was keep-alive
+ * we need to add a keep-alive header, too.
+ */
+ if (req->minor == 0 && is_keepalive)
+ evhttp_add_header(req->output_headers,
+ "Connection", "keep-alive");
+
+ if ((req->minor >= 1 || is_keepalive) &&
+ evhttp_response_needs_body(req)) {
+ /*
+ * we need to add the content length if the
+ * user did not give it, this is required for
+ * persistent connections to work.
+ */
+ evhttp_maybe_add_content_length_header(
+ req->output_headers,
+ evbuffer_get_length(req->output_buffer));
+ }
+ }
+
+ /* Potentially add headers for unidentified content. */
+ if (evhttp_response_needs_body(req)) {
+ if (evhttp_find_header(req->output_headers,
+ "Content-Type") == NULL
+ && evcon->http_server->default_content_type) {
+ evhttp_add_header(req->output_headers,
+ "Content-Type",
+ evcon->http_server->default_content_type);
+ }
+ }
+
+ /* if the request asked for a close, we send a close, too */
+ if (evhttp_is_connection_close(req->flags, req->input_headers)) {
+ evhttp_remove_header(req->output_headers, "Connection");
+ if (!(req->flags & EVHTTP_PROXY_REQUEST))
+ evhttp_add_header(req->output_headers, "Connection", "close");
+ evhttp_remove_header(req->output_headers, "Proxy-Connection");
+ }
+}
+
+/** Generate all headers appropriate for sending the http request in req (or
+ * the response, if we're sending a response), and write them to evcon's
+ * bufferevent. Also writes all data from req->output_buffer */
+static void
+evhttp_make_header(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+ struct evkeyval *header;
+ struct evbuffer *output = bufferevent_get_output(evcon->bufev);
+
+ /*
+ * Depending if this is a HTTP request or response, we might need to
+ * add some new headers or remove existing headers.
+ */
+ if (req->kind == EVHTTP_REQUEST) {
+ evhttp_make_header_request(evcon, req);
+ } else {
+ evhttp_make_header_response(evcon, req);
+ }
+
+ TAILQ_FOREACH(header, req->output_headers, next) {
+ evbuffer_add_printf(output, "%s: %s\r\n",
+ header->key, header->value);
+ }
+ evbuffer_add(output, "\r\n", 2);
+
+ if (evbuffer_get_length(req->output_buffer) > 0) {
+ /*
+ * For a request, we add the POST data, for a reply, this
+ * is the regular data.
+ */
+ /* XXX We might want to support waiting (a limited amount of
+ time) for a continue status line from the server before
+ sending POST/PUT message bodies. */
+ evbuffer_add_buffer(output, req->output_buffer);
+ }
+}
+
+void
+evhttp_connection_set_max_headers_size(struct evhttp_connection *evcon,
+ ev_ssize_t new_max_headers_size)
+{
+ if (new_max_headers_size<0)
+ evcon->max_headers_size = EV_SIZE_MAX;
+ else
+ evcon->max_headers_size = new_max_headers_size;
+}
+void
+evhttp_connection_set_max_body_size(struct evhttp_connection* evcon,
+ ev_ssize_t new_max_body_size)
+{
+ if (new_max_body_size<0)
+ evcon->max_body_size = EV_UINT64_MAX;
+ else
+ evcon->max_body_size = new_max_body_size;
+}
+
+static int
+evhttp_connection_incoming_fail(struct evhttp_request *req,
+ enum evhttp_request_error error)
+{
+ switch (error) {
+ case EVREQ_HTTP_TIMEOUT:
+ case EVREQ_HTTP_EOF:
+ /*
+ * these are cases in which we probably should just
+ * close the connection and not send a reply. this
+ * case may happen when a browser keeps a persistent
+ * connection open and we timeout on the read. when
+ * the request is still being used for sending, we
+ * need to disassociated it from the connection here.
+ */
+ if (!req->userdone) {
+ /* remove it so that it will not be freed */
+ TAILQ_REMOVE(&req->evcon->requests, req, next);
+ /* indicate that this request no longer has a
+ * connection object
+ */
+ req->evcon = NULL;
+ }
+ return (-1);
+ case EVREQ_HTTP_INVALID_HEADER:
+ case EVREQ_HTTP_BUFFER_ERROR:
+ case EVREQ_HTTP_REQUEST_CANCEL:
+ case EVREQ_HTTP_DATA_TOO_LONG:
+ default: /* xxx: probably should just error on default */
+ /* the callback looks at the uri to determine errors */
+ if (req->uri) {
+ mm_free(req->uri);
+ req->uri = NULL;
+ }
+ if (req->uri_elems) {
+ evhttp_uri_free(req->uri_elems);
+ req->uri_elems = NULL;
+ }
+
+ /*
+ * the callback needs to send a reply, once the reply has
+ * been send, the connection should get freed.
+ */
+ (*req->cb)(req, req->cb_arg);
+ }
+
+ return (0);
+}
+
+/* Free connection ownership of which can be acquired by user using
+ * evhttp_request_own(). */
+static inline void
+evhttp_request_free_auto(struct evhttp_request *req)
+{
+ if (!(req->flags & EVHTTP_USER_OWNED))
+ evhttp_request_free(req);
+}
+
+static void
+evhttp_request_free_(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+ TAILQ_REMOVE(&evcon->requests, req, next);
+ evhttp_request_free_auto(req);
+}
+
+/* Called when evcon has experienced a (non-recoverable? -NM) error, as
+ * given in error. If it's an outgoing connection, reset the connection,
+ * retry any pending requests, and inform the user. If it's incoming,
+ * delegates to evhttp_connection_incoming_fail(). */
+void
+evhttp_connection_fail_(struct evhttp_connection *evcon,
+ enum evhttp_request_error error)
+{
+ const int errsave = EVUTIL_SOCKET_ERROR();
+ struct evhttp_request* req = TAILQ_FIRST(&evcon->requests);
+ void (*cb)(struct evhttp_request *, void *);
+ void *cb_arg;
+ void (*error_cb)(enum evhttp_request_error, void *);
+ void *error_cb_arg;
+ EVUTIL_ASSERT(req != NULL);
+
+ bufferevent_disable(evcon->bufev, EV_READ|EV_WRITE);
+
+ if (evcon->flags & EVHTTP_CON_INCOMING) {
+ /*
+ * for incoming requests, there are two different
+ * failure cases. it's either a network level error
+ * or an http layer error. for problems on the network
+ * layer like timeouts we just drop the connections.
+ * For HTTP problems, we might have to send back a
+ * reply before the connection can be freed.
+ */
+ if (evhttp_connection_incoming_fail(req, error) == -1)
+ evhttp_connection_free(evcon);
+ return;
+ }
+
+ error_cb = req->error_cb;
+ error_cb_arg = req->cb_arg;
+ /* when the request was canceled, the callback is not executed */
+ if (error != EVREQ_HTTP_REQUEST_CANCEL) {
+ /* save the callback for later; the cb might free our object */
+ cb = req->cb;
+ cb_arg = req->cb_arg;
+ } else {
+ cb = NULL;
+ cb_arg = NULL;
+ }
+
+ /* do not fail all requests; the next request is going to get
+ * send over a new connection. when a user cancels a request,
+ * all other pending requests should be processed as normal
+ */
+ evhttp_request_free_(evcon, req);
+
+ /* reset the connection */
+ evhttp_connection_reset_(evcon);
+
+ /* We are trying the next request that was queued on us */
+ if (TAILQ_FIRST(&evcon->requests) != NULL)
+ evhttp_connection_connect_(evcon);
+
+ /* The call to evhttp_connection_reset_ overwrote errno.
+ * Let's restore the original errno, so that the user's
+ * callback can have a better idea of what the error was.
+ */
+ EVUTIL_SET_SOCKET_ERROR(errsave);
+
+ /* inform the user */
+ if (error_cb != NULL)
+ error_cb(error, error_cb_arg);
+ if (cb != NULL)
+ (*cb)(NULL, cb_arg);
+}
+
+/* Bufferevent callback: invoked when any data has been written from an
+ * http connection's bufferevent */
+static void
+evhttp_write_cb(struct bufferevent *bufev, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+
+ /* Activate our call back */
+ if (evcon->cb != NULL)
+ (*evcon->cb)(evcon, evcon->cb_arg);
+}
+
+/**
+ * Advance the connection state.
+ * - If this is an outgoing connection, we've just processed the response;
+ * idle or close the connection.
+ * - If this is an incoming connection, we've just processed the request;
+ * respond.
+ */
+static void
+evhttp_connection_done(struct evhttp_connection *evcon)
+{
+ struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+ int con_outgoing = evcon->flags & EVHTTP_CON_OUTGOING;
+ int free_evcon = 0;
+
+ if (con_outgoing) {
+ /* idle or close the connection */
+ int need_close = evhttp_is_request_connection_close(req);
+ TAILQ_REMOVE(&evcon->requests, req, next);
+ req->evcon = NULL;
+
+ evcon->state = EVCON_IDLE;
+
+ /* check if we got asked to close the connection */
+ if (need_close)
+ evhttp_connection_reset_(evcon);
+
+ if (TAILQ_FIRST(&evcon->requests) != NULL) {
+ /*
+ * We have more requests; reset the connection
+ * and deal with the next request.
+ */
+ if (!evhttp_connected(evcon))
+ evhttp_connection_connect_(evcon);
+ else
+ evhttp_request_dispatch(evcon);
+ } else if (!need_close) {
+ /*
+ * The connection is going to be persistent, but we
+ * need to detect if the other side closes it.
+ */
+ evhttp_connection_start_detectclose(evcon);
+ } else if ((evcon->flags & EVHTTP_CON_AUTOFREE)) {
+ /*
+ * If we have no more requests that need completion
+ * and we're not waiting for the connection to close
+ */
+ free_evcon = 1;
+ }
+ } else {
+ /*
+ * incoming connection - we need to leave the request on the
+ * connection so that we can reply to it.
+ */
+ evcon->state = EVCON_WRITING;
+ }
+
+ /* notify the user of the request */
+ (*req->cb)(req, req->cb_arg);
+
+ /* if this was an outgoing request, we own and it's done. so free it. */
+ if (con_outgoing) {
+ evhttp_request_free_auto(req);
+ }
+
+ /* If this was the last request of an outgoing connection and we're
+ * not waiting to receive a connection close event and we want to
+ * automatically free the connection. We check to ensure our request
+ * list is empty one last time just in case our callback added a
+ * new request.
+ */
+ if (free_evcon && TAILQ_FIRST(&evcon->requests) == NULL) {
+ evhttp_connection_free(evcon);
+ }
+}
+
+/*
+ * Handles reading from a chunked request.
+ * return ALL_DATA_READ:
+ * all data has been read
+ * return MORE_DATA_EXPECTED:
+ * more data is expected
+ * return DATA_CORRUPTED:
+ * data is corrupted
+ * return REQUEST_CANCELED:
+ * request was canceled by the user calling evhttp_cancel_request
+ * return DATA_TOO_LONG:
+ * ran over the maximum limit
+ */
+
+static enum message_read_status
+evhttp_handle_chunked_read(struct evhttp_request *req, struct evbuffer *buf)
+{
+ if (req == NULL || buf == NULL) {
+ return DATA_CORRUPTED;
+ }
+
+ while (1) {
+ size_t buflen;
+
+ if ((buflen = evbuffer_get_length(buf)) == 0) {
+ break;
+ }
+
+ /* evbuffer_get_length returns size_t, but len variable is ssize_t,
+ * check for overflow conditions */
+ if (buflen > EV_SSIZE_MAX) {
+ return DATA_CORRUPTED;
+ }
+
+ if (req->ntoread < 0) {
+ /* Read chunk size */
+ ev_int64_t ntoread;
+ char *p = evbuffer_readln(buf, NULL, EVBUFFER_EOL_CRLF);
+ char *endp;
+ int error;
+ if (p == NULL)
+ break;
+ /* the last chunk is on a new line? */
+ if (strlen(p) == 0) {
+ mm_free(p);
+ continue;
+ }
+ ntoread = evutil_strtoll(p, &endp, 16);
+ error = (*p == '\0' ||
+ (*endp != '\0' && *endp != ' ') ||
+ ntoread < 0);
+ mm_free(p);
+ if (error) {
+ /* could not get chunk size */
+ return (DATA_CORRUPTED);
+ }
+
+ /* ntoread is signed int64, body_size is unsigned size_t, check for under/overflow conditions */
+ if ((ev_uint64_t)ntoread > EV_SIZE_MAX - req->body_size) {
+ return DATA_CORRUPTED;
+ }
+
+ if (req->body_size + (size_t)ntoread > req->evcon->max_body_size) {
+ /* failed body length test */
+ event_debug(("Request body is too long"));
+ return (DATA_TOO_LONG);
+ }
+
+ req->body_size += (size_t)ntoread;
+ req->ntoread = ntoread;
+ if (req->ntoread == 0) {
+ /* Last chunk */
+ return (ALL_DATA_READ);
+ }
+ continue;
+ }
+
+ /* req->ntoread is signed int64, len is ssize_t, based on arch,
+ * ssize_t could only be 32b, check for these conditions */
+ if (req->ntoread > EV_SSIZE_MAX) {
+ return DATA_CORRUPTED;
+ }
+
+ /* don't have enough to complete a chunk; wait for more */
+ if (req->ntoread > 0 && buflen < (ev_uint64_t)req->ntoread)
+ return (MORE_DATA_EXPECTED);
+
+ /* Completed chunk */
+ evbuffer_remove_buffer(buf, req->input_buffer, (size_t)req->ntoread);
+ req->ntoread = -1;
+ if (req->chunk_cb != NULL) {
+ req->flags |= EVHTTP_REQ_DEFER_FREE;
+ (*req->chunk_cb)(req, req->cb_arg);
+ evbuffer_drain(req->input_buffer,
+ evbuffer_get_length(req->input_buffer));
+ req->flags &= ~EVHTTP_REQ_DEFER_FREE;
+ if ((req->flags & EVHTTP_REQ_NEEDS_FREE) != 0) {
+ return (REQUEST_CANCELED);
+ }
+ }
+ }
+
+ return (MORE_DATA_EXPECTED);
+}
+
+static void
+evhttp_read_trailer(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+ struct evbuffer *buf = bufferevent_get_input(evcon->bufev);
+
+ switch (evhttp_parse_headers_(req, buf)) {
+ case DATA_CORRUPTED:
+ case DATA_TOO_LONG:
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_DATA_TOO_LONG);
+ break;
+ case ALL_DATA_READ:
+ bufferevent_disable(evcon->bufev, EV_READ);
+ evhttp_connection_done(evcon);
+ break;
+ case MORE_DATA_EXPECTED:
+ case REQUEST_CANCELED: /* ??? */
+ default:
+ break;
+ }
+}
+
+static void
+evhttp_read_body(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+ struct evbuffer *buf = bufferevent_get_input(evcon->bufev);
+
+ if (req->chunked) {
+ switch (evhttp_handle_chunked_read(req, buf)) {
+ case ALL_DATA_READ:
+ /* finished last chunk */
+ evcon->state = EVCON_READING_TRAILER;
+ evhttp_read_trailer(evcon, req);
+ return;
+ case DATA_CORRUPTED:
+ case DATA_TOO_LONG:
+ /* corrupted data */
+ evhttp_connection_fail_(evcon,
+ EVREQ_HTTP_DATA_TOO_LONG);
+ return;
+ case REQUEST_CANCELED:
+ /* request canceled */
+ evhttp_request_free_auto(req);
+ return;
+ case MORE_DATA_EXPECTED:
+ default:
+ break;
+ }
+ } else if (req->ntoread < 0) {
+ /* Read until connection close. */
+ if ((size_t)(req->body_size + evbuffer_get_length(buf)) < req->body_size) {
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_INVALID_HEADER);
+ return;
+ }
+
+ req->body_size += evbuffer_get_length(buf);
+ evbuffer_add_buffer(req->input_buffer, buf);
+ } else if (req->chunk_cb != NULL || evbuffer_get_length(buf) >= (size_t)req->ntoread) {
+ /* XXX: the above get_length comparison has to be fixed for overflow conditions! */
+ /* We've postponed moving the data until now, but we're
+ * about to use it. */
+ size_t n = evbuffer_get_length(buf);
+
+ if (n > (size_t) req->ntoread)
+ n = (size_t) req->ntoread;
+ req->ntoread -= n;
+ req->body_size += n;
+ evbuffer_remove_buffer(buf, req->input_buffer, n);
+ }
+
+ if (req->body_size > req->evcon->max_body_size ||
+ (!req->chunked && req->ntoread >= 0 &&
+ (size_t)req->ntoread > req->evcon->max_body_size)) {
+ /* XXX: The above casted comparison must checked for overflow */
+ /* failed body length test */
+ event_debug(("Request body is too long"));
+ evhttp_connection_fail_(evcon,
+ EVREQ_HTTP_DATA_TOO_LONG);
+ return;
+ }
+
+ if (evbuffer_get_length(req->input_buffer) > 0 && req->chunk_cb != NULL) {
+ req->flags |= EVHTTP_REQ_DEFER_FREE;
+ (*req->chunk_cb)(req, req->cb_arg);
+ req->flags &= ~EVHTTP_REQ_DEFER_FREE;
+ evbuffer_drain(req->input_buffer,
+ evbuffer_get_length(req->input_buffer));
+ if ((req->flags & EVHTTP_REQ_NEEDS_FREE) != 0) {
+ evhttp_request_free_auto(req);
+ return;
+ }
+ }
+
+ if (req->ntoread == 0) {
+ bufferevent_disable(evcon->bufev, EV_READ);
+ /* Completed content length */
+ evhttp_connection_done(evcon);
+ return;
+ }
+}
+
+#define get_deferred_queue(evcon) \
+ ((evcon)->base)
+
+/*
+ * Gets called when more data becomes available
+ */
+
+static void
+evhttp_read_cb(struct bufferevent *bufev, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+ struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+
+ /* Cancel if it's pending. */
+ event_deferred_cb_cancel_(get_deferred_queue(evcon),
+ &evcon->read_more_deferred_cb);
+
+ switch (evcon->state) {
+ case EVCON_READING_FIRSTLINE:
+ evhttp_read_firstline(evcon, req);
+ /* note the request may have been freed in
+ * evhttp_read_body */
+ break;
+ case EVCON_READING_HEADERS:
+ evhttp_read_header(evcon, req);
+ /* note the request may have been freed in
+ * evhttp_read_body */
+ break;
+ case EVCON_READING_BODY:
+ evhttp_read_body(evcon, req);
+ /* note the request may have been freed in
+ * evhttp_read_body */
+ break;
+ case EVCON_READING_TRAILER:
+ evhttp_read_trailer(evcon, req);
+ break;
+ case EVCON_IDLE:
+ {
+#ifdef USE_DEBUG
+ struct evbuffer *input;
+ size_t total_len;
+
+ input = bufferevent_get_input(evcon->bufev);
+ total_len = evbuffer_get_length(input);
+ event_debug(("%s: read "EV_SIZE_FMT
+ " bytes in EVCON_IDLE state,"
+ " resetting connection",
+ __func__, EV_SIZE_ARG(total_len)));
+#endif
+
+ evhttp_connection_reset_(evcon);
+ }
+ break;
+ case EVCON_DISCONNECTED:
+ case EVCON_CONNECTING:
+ case EVCON_WRITING:
+ default:
+ event_errx(1, "%s: illegal connection state %d",
+ __func__, evcon->state);
+ }
+}
+
+static void
+evhttp_deferred_read_cb(struct event_callback *cb, void *data)
+{
+ struct evhttp_connection *evcon = data;
+ evhttp_read_cb(evcon->bufev, evcon);
+}
+
+static void
+evhttp_write_connectioncb(struct evhttp_connection *evcon, void *arg)
+{
+ /* This is after writing the request to the server */
+ struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+ EVUTIL_ASSERT(req != NULL);
+
+ EVUTIL_ASSERT(evcon->state == EVCON_WRITING);
+
+ /* We need to wait until we've written all of our output data before we can continue */
+ if (evbuffer_get_length(bufferevent_get_output(evcon->bufev)) > 0) { return; }
+
+ /* We are done writing our header and are now expecting the response */
+ req->kind = EVHTTP_RESPONSE;
+
+ evhttp_start_read_(evcon);
+}
+
+/*
+ * Clean up a connection object
+ */
+
+void
+evhttp_connection_free(struct evhttp_connection *evcon)
+{
+ struct evhttp_request *req;
+
+ /* notify interested parties that this connection is going down */
+ if (evcon->fd != -1) {
+ if (evhttp_connected(evcon) && evcon->closecb != NULL)
+ (*evcon->closecb)(evcon, evcon->closecb_arg);
+ }
+
+ /* remove all requests that might be queued on this
+ * connection. for server connections, this should be empty.
+ * because it gets dequeued either in evhttp_connection_done or
+ * evhttp_connection_fail_.
+ */
+ while ((req = TAILQ_FIRST(&evcon->requests)) != NULL) {
+ evhttp_request_free_(evcon, req);
+ }
+
+ if (evcon->http_server != NULL) {
+ struct evhttp *http = evcon->http_server;
+ TAILQ_REMOVE(&http->connections, evcon, next);
+ }
+
+ if (event_initialized(&evcon->retry_ev)) {
+ event_del(&evcon->retry_ev);
+ event_debug_unassign(&evcon->retry_ev);
+ }
+
+ if (evcon->bufev != NULL)
+ bufferevent_free(evcon->bufev);
+
+ event_deferred_cb_cancel_(get_deferred_queue(evcon),
+ &evcon->read_more_deferred_cb);
+
+ if (evcon->fd != -1) {
+ bufferevent_disable(evcon->bufev, EV_READ|EV_WRITE);
+ shutdown(evcon->fd, EVUTIL_SHUT_WR);
+ if (!(bufferevent_get_options_(evcon->bufev) & BEV_OPT_CLOSE_ON_FREE)) {
+ evutil_closesocket(evcon->fd);
+ }
+ }
+
+ if (evcon->bind_address != NULL)
+ mm_free(evcon->bind_address);
+
+ if (evcon->address != NULL)
+ mm_free(evcon->address);
+
+ mm_free(evcon);
+}
+
+void
+evhttp_connection_free_on_completion(struct evhttp_connection *evcon) {
+ evcon->flags |= EVHTTP_CON_AUTOFREE;
+}
+
+void
+evhttp_connection_set_local_address(struct evhttp_connection *evcon,
+ const char *address)
+{
+ EVUTIL_ASSERT(evcon->state == EVCON_DISCONNECTED);
+ if (evcon->bind_address)
+ mm_free(evcon->bind_address);
+ if ((evcon->bind_address = mm_strdup(address)) == NULL)
+ event_warn("%s: strdup", __func__);
+}
+
+void
+evhttp_connection_set_local_port(struct evhttp_connection *evcon,
+ ev_uint16_t port)
+{
+ EVUTIL_ASSERT(evcon->state == EVCON_DISCONNECTED);
+ evcon->bind_port = port;
+}
+
+static void
+evhttp_request_dispatch(struct evhttp_connection* evcon)
+{
+ struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+
+ /* this should not usually happy but it's possible */
+ if (req == NULL)
+ return;
+
+ /* delete possible close detection events */
+ evhttp_connection_stop_detectclose(evcon);
+
+ /* we assume that the connection is connected already */
+ EVUTIL_ASSERT(evcon->state == EVCON_IDLE);
+
+ evcon->state = EVCON_WRITING;
+
+ /* Create the header from the store arguments */
+ evhttp_make_header(evcon, req);
+
+ evhttp_write_buffer(evcon, evhttp_write_connectioncb, NULL);
+}
+
+/* Reset our connection state: disables reading/writing, closes our fd (if
+* any), clears out buffers, and puts us in state DISCONNECTED. */
+void
+evhttp_connection_reset_(struct evhttp_connection *evcon)
+{
+ struct evbuffer *tmp;
+
+ /* XXXX This is not actually an optimal fix. Instead we ought to have
+ an API for "stop connecting", or use bufferevent_setfd to turn off
+ connecting. But for Libevent 2.0, this seems like a minimal change
+ least likely to disrupt the rest of the bufferevent and http code.
+
+ Why is this here? If the fd is set in the bufferevent, and the
+ bufferevent is connecting, then you can't actually stop the
+ bufferevent from trying to connect with bufferevent_disable(). The
+ connect will never trigger, since we close the fd, but the timeout
+ might. That caused an assertion failure in evhttp_connection_fail_.
+ */
+ bufferevent_disable_hard_(evcon->bufev, EV_READ|EV_WRITE);
+
+ if (evcon->fd != -1) {
+ /* inform interested parties about connection close */
+ if (evhttp_connected(evcon) && evcon->closecb != NULL)
+ (*evcon->closecb)(evcon, evcon->closecb_arg);
+
+ shutdown(evcon->fd, EVUTIL_SHUT_WR);
+ evutil_closesocket(evcon->fd);
+ bufferevent_setfd(evcon->bufev, -1);
+ evcon->fd = -1;
+ }
+
+ /* we need to clean up any buffered data */
+ tmp = bufferevent_get_output(evcon->bufev);
+ evbuffer_drain(tmp, evbuffer_get_length(tmp));
+ tmp = bufferevent_get_input(evcon->bufev);
+ evbuffer_drain(tmp, evbuffer_get_length(tmp));
+
+ evcon->state = EVCON_DISCONNECTED;
+}
+
+static void
+evhttp_connection_start_detectclose(struct evhttp_connection *evcon)
+{
+ evcon->flags |= EVHTTP_CON_CLOSEDETECT;
+
+ bufferevent_enable(evcon->bufev, EV_READ);
+}
+
+static void
+evhttp_connection_stop_detectclose(struct evhttp_connection *evcon)
+{
+ evcon->flags &= ~EVHTTP_CON_CLOSEDETECT;
+
+ bufferevent_disable(evcon->bufev, EV_READ);
+}
+
+static void
+evhttp_connection_retry(evutil_socket_t fd, short what, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+
+ evcon->state = EVCON_DISCONNECTED;
+ evhttp_connection_connect_(evcon);
+}
+
+static void
+evhttp_connection_cb_cleanup(struct evhttp_connection *evcon)
+{
+ struct evcon_requestq requests;
+
+ evhttp_connection_reset_(evcon);
+ if (evcon->retry_max < 0 || evcon->retry_cnt < evcon->retry_max) {
+ struct timeval tv_retry = evcon->initial_retry_timeout;
+ int i;
+ evtimer_assign(&evcon->retry_ev, evcon->base, evhttp_connection_retry, evcon);
+ /* XXXX handle failure from evhttp_add_event */
+ for (i=0; i < evcon->retry_cnt; ++i) {
+ tv_retry.tv_usec *= 2;
+ if (tv_retry.tv_usec > 1000000) {
+ tv_retry.tv_usec -= 1000000;
+ tv_retry.tv_sec += 1;
+ }
+ tv_retry.tv_sec *= 2;
+ if (tv_retry.tv_sec > 3600) {
+ tv_retry.tv_sec = 3600;
+ tv_retry.tv_usec = 0;
+ }
+ }
+ event_add(&evcon->retry_ev, &tv_retry);
+ evcon->retry_cnt++;
+ return;
+ }
+
+ /*
+ * User callback can do evhttp_make_request() on the same
+ * evcon so new request will be added to evcon->requests. To
+ * avoid freeing it prematurely we iterate over the copy of
+ * the queue.
+ */
+ TAILQ_INIT(&requests);
+ while (TAILQ_FIRST(&evcon->requests) != NULL) {
+ struct evhttp_request *request = TAILQ_FIRST(&evcon->requests);
+ TAILQ_REMOVE(&evcon->requests, request, next);
+ TAILQ_INSERT_TAIL(&requests, request, next);
+ }
+
+ /* for now, we just signal all requests by executing their callbacks */
+ while (TAILQ_FIRST(&requests) != NULL) {
+ struct evhttp_request *request = TAILQ_FIRST(&requests);
+ TAILQ_REMOVE(&requests, request, next);
+ request->evcon = NULL;
+
+ /* we might want to set an error here */
+ request->cb(request, request->cb_arg);
+ evhttp_request_free_auto(request);
+ }
+}
+
+static void
+evhttp_error_cb(struct bufferevent *bufev, short what, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+ struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+
+ if (evcon->fd == -1)
+ evcon->fd = bufferevent_getfd(bufev);
+
+ switch (evcon->state) {
+ case EVCON_CONNECTING:
+ if (what & BEV_EVENT_TIMEOUT) {
+ event_debug(("%s: connection timeout for \"%s:%d\" on "
+ EV_SOCK_FMT,
+ __func__, evcon->address, evcon->port,
+ EV_SOCK_ARG(evcon->fd)));
+ evhttp_connection_cb_cleanup(evcon);
+ return;
+ }
+ break;
+
+ case EVCON_READING_BODY:
+ if (!req->chunked && req->ntoread < 0
+ && what == (BEV_EVENT_READING|BEV_EVENT_EOF)) {
+ /* EOF on read can be benign */
+ evhttp_connection_done(evcon);
+ return;
+ }
+ break;
+
+ case EVCON_DISCONNECTED:
+ case EVCON_IDLE:
+ case EVCON_READING_FIRSTLINE:
+ case EVCON_READING_HEADERS:
+ case EVCON_READING_TRAILER:
+ case EVCON_WRITING:
+ default:
+ break;
+ }
+
+ /* when we are in close detect mode, a read error means that
+ * the other side closed their connection.
+ */
+ if (evcon->flags & EVHTTP_CON_CLOSEDETECT) {
+ evcon->flags &= ~EVHTTP_CON_CLOSEDETECT;
+ EVUTIL_ASSERT(evcon->http_server == NULL);
+ /* For connections from the client, we just
+ * reset the connection so that it becomes
+ * disconnected.
+ */
+ EVUTIL_ASSERT(evcon->state == EVCON_IDLE);
+ evhttp_connection_reset_(evcon);
+
+ /*
+ * If we have no more requests that need completion
+ * and we want to auto-free the connection when all
+ * requests have been completed.
+ */
+ if (TAILQ_FIRST(&evcon->requests) == NULL
+ && (evcon->flags & EVHTTP_CON_OUTGOING)
+ && (evcon->flags & EVHTTP_CON_AUTOFREE)) {
+ evhttp_connection_free(evcon);
+ }
+ return;
+ }
+
+ if (what & BEV_EVENT_TIMEOUT) {
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_TIMEOUT);
+ } else if (what & (BEV_EVENT_EOF|BEV_EVENT_ERROR)) {
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_EOF);
+ } else if (what == BEV_EVENT_CONNECTED) {
+ } else {
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_BUFFER_ERROR);
+ }
+}
+
+/*
+ * Event callback for asynchronous connection attempt.
+ */
+static void
+evhttp_connection_cb(struct bufferevent *bufev, short what, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+ int error;
+ ev_socklen_t errsz = sizeof(error);
+
+ if (evcon->fd == -1)
+ evcon->fd = bufferevent_getfd(bufev);
+
+ if (!(what & BEV_EVENT_CONNECTED)) {
+ /* some operating systems return ECONNREFUSED immediately
+ * when connecting to a local address. the cleanup is going
+ * to reschedule this function call.
+ */
+#ifndef _WIN32
+ if (errno == ECONNREFUSED)
+ goto cleanup;
+#endif
+ evhttp_error_cb(bufev, what, arg);
+ return;
+ }
+
+ if (evcon->fd == -1) {
+ event_debug(("%s: bufferevent_getfd returned -1",
+ __func__));
+ goto cleanup;
+ }
+
+ /* Check if the connection completed */
+ if (getsockopt(evcon->fd, SOL_SOCKET, SO_ERROR, (void*)&error,
+ &errsz) == -1) {
+ event_debug(("%s: getsockopt for \"%s:%d\" on "EV_SOCK_FMT,
+ __func__, evcon->address, evcon->port,
+ EV_SOCK_ARG(evcon->fd)));
+ goto cleanup;
+ }
+
+ if (error) {
+ event_debug(("%s: connect failed for \"%s:%d\" on "
+ EV_SOCK_FMT": %s",
+ __func__, evcon->address, evcon->port,
+ EV_SOCK_ARG(evcon->fd),
+ evutil_socket_error_to_string(error)));
+ goto cleanup;
+ }
+
+ /* We are connected to the server now */
+ event_debug(("%s: connected to \"%s:%d\" on "EV_SOCK_FMT"\n",
+ __func__, evcon->address, evcon->port,
+ EV_SOCK_ARG(evcon->fd)));
+
+ /* Reset the retry count as we were successful in connecting */
+ evcon->retry_cnt = 0;
+ evcon->state = EVCON_IDLE;
+
+ /* reset the bufferevent cbs */
+ bufferevent_setcb(evcon->bufev,
+ evhttp_read_cb,
+ evhttp_write_cb,
+ evhttp_error_cb,
+ evcon);
+
+ if (!evutil_timerisset(&evcon->timeout)) {
+ const struct timeval read_tv = { HTTP_READ_TIMEOUT, 0 };
+ const struct timeval write_tv = { HTTP_WRITE_TIMEOUT, 0 };
+ bufferevent_set_timeouts(evcon->bufev, &read_tv, &write_tv);
+ } else {
+ bufferevent_set_timeouts(evcon->bufev, &evcon->timeout, &evcon->timeout);
+ }
+
+ /* try to start requests that have queued up on this connection */
+ evhttp_request_dispatch(evcon);
+ return;
+
+ cleanup:
+ evhttp_connection_cb_cleanup(evcon);
+}
+
+/*
+ * Check if we got a valid response code.
+ */
+
+static int
+evhttp_valid_response_code(int code)
+{
+ if (code == 0)
+ return (0);
+
+ return (1);
+}
+
+static int
+evhttp_parse_http_version(const char *version, struct evhttp_request *req)
+{
+ int major, minor;
+ char ch;
+ int n = sscanf(version, "HTTP/%d.%d%c", &major, &minor, &ch);
+ if (n != 2 || major > 1) {
+ event_debug(("%s: bad version %s on message %p from %s",
+ __func__, version, req, req->remote_host));
+ return (-1);
+ }
+ req->major = major;
+ req->minor = minor;
+ return (0);
+}
+
+/* Parses the status line of a web server */
+
+static int
+evhttp_parse_response_line(struct evhttp_request *req, char *line)
+{
+ char *protocol;
+ char *number;
+ const char *readable = "";
+
+ protocol = strsep(&line, " ");
+ if (line == NULL)
+ return (-1);
+ number = strsep(&line, " ");
+ if (line != NULL)
+ readable = line;
+
+ if (evhttp_parse_http_version(protocol, req) < 0)
+ return (-1);
+
+ req->response_code = atoi(number);
+ if (!evhttp_valid_response_code(req->response_code)) {
+ event_debug(("%s: bad response code \"%s\"",
+ __func__, number));
+ return (-1);
+ }
+
+ if ((req->response_code_line = mm_strdup(readable)) == NULL) {
+ event_warn("%s: strdup", __func__);
+ return (-1);
+ }
+
+ return (0);
+}
+
+/* Parse the first line of a HTTP request */
+
+static int
+evhttp_parse_request_line(struct evhttp_request *req, char *line)
+{
+ char *method;
+ char *uri;
+ char *version;
+ const char *hostname;
+ const char *scheme;
+ size_t method_len;
+ enum evhttp_cmd_type type;
+
+ /* Parse the request line */
+ method = strsep(&line, " ");
+ if (line == NULL)
+ return (-1);
+ uri = strsep(&line, " ");
+ if (line == NULL)
+ return (-1);
+ version = strsep(&line, " ");
+ if (line != NULL)
+ return (-1);
+
+ method_len = (uri - method) - 1;
+ type = EVHTTP_REQ_UNKNOWN_;
+
+ /* First line */
+ switch (method_len) {
+ case 3:
+ /* The length of the method string is 3, meaning it can only be one of two methods: GET or PUT */
+
+ /* Since both GET and PUT share the same character 'T' at the end,
+ * if the string doesn't have 'T', we can immediately determine this
+ * is an invalid HTTP method */
+
+ if (method[2] != 'T') {
+ break;
+ }
+
+ switch (*method) {
+ case 'G':
+ /* This first byte is 'G', so make sure the next byte is
+ * 'E', if it isn't then this isn't a valid method */
+
+ if (method[1] == 'E') {
+ type = EVHTTP_REQ_GET;
+ }
+
+ break;
+ case 'P':
+ /* First byte is P, check second byte for 'U', if not,
+ * we know it's an invalid method */
+ if (method[1] == 'U') {
+ type = EVHTTP_REQ_PUT;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ /* The method length is 4 bytes, leaving only the methods "POST" and "HEAD" */
+ switch (*method) {
+ case 'P':
+ if (method[3] == 'T' && method[2] == 'S' && method[1] == 'O') {
+ type = EVHTTP_REQ_POST;
+ }
+ break;
+ case 'H':
+ if (method[3] == 'D' && method[2] == 'A' && method[1] == 'E') {
+ type = EVHTTP_REQ_HEAD;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case 5:
+ /* Method length is 5 bytes, which can only encompass PATCH and TRACE */
+ switch (*method) {
+ case 'P':
+ if (method[4] == 'H' && method[3] == 'C' && method[2] == 'T' && method[1] == 'A') {
+ type = EVHTTP_REQ_PATCH;
+ }
+ break;
+ case 'T':
+ if (method[4] == 'E' && method[3] == 'C' && method[2] == 'A' && method[1] == 'R') {
+ type = EVHTTP_REQ_TRACE;
+ }
+
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ /* Method length is 6, only valid method 6 bytes in length is DELEte */
+
+ /* If the first byte isn't 'D' then it's invalid */
+ if (*method != 'D') {
+ break;
+ }
+
+ if (method[5] == 'E' && method[4] == 'T' && method[3] == 'E' && method[2] == 'L' && method[1] == 'E') {
+ type = EVHTTP_REQ_DELETE;
+ }
+
+ break;
+ case 7:
+ /* Method length is 7, only valid methods are "OPTIONS" and "CONNECT" */
+ switch (*method) {
+ case 'O':
+ if (method[6] == 'S' && method[5] == 'N' && method[4] == 'O' &&
+ method[3] == 'I' && method[2] == 'T' && method[1] == 'P') {
+ type = EVHTTP_REQ_OPTIONS;
+ }
+
+ break;
+ case 'C':
+ if (method[6] == 'T' && method[5] == 'C' && method[4] == 'E' &&
+ method[3] == 'N' && method[2] == 'N' && method[1] == 'O') {
+ type = EVHTTP_REQ_CONNECT;
+ }
+
+ break;
+ default:
+ break;
+ }
+ break;
+ } /* switch */
+
+ if ((int)type == EVHTTP_REQ_UNKNOWN_) {
+ event_debug(("%s: bad method %s on request %p from %s",
+ __func__, method, req, req->remote_host));
+ /* No error yet; we'll give a better error later when
+ * we see that req->type is unsupported. */
+ }
+
+ req->type = type;
+
+ if (evhttp_parse_http_version(version, req) < 0)
+ return (-1);
+
+ if ((req->uri = mm_strdup(uri)) == NULL) {
+ event_debug(("%s: mm_strdup", __func__));
+ return (-1);
+ }
+
+ if ((req->uri_elems = evhttp_uri_parse_with_flags(req->uri,
+ EVHTTP_URI_NONCONFORMANT)) == NULL) {
+ return -1;
+ }
+
+ /* If we have an absolute-URI, check to see if it is an http request
+ for a known vhost or server alias. If we don't know about this
+ host, we consider it a proxy request. */
+ scheme = evhttp_uri_get_scheme(req->uri_elems);
+ hostname = evhttp_uri_get_host(req->uri_elems);
+ if (scheme && (!evutil_ascii_strcasecmp(scheme, "http") ||
+ !evutil_ascii_strcasecmp(scheme, "https")) &&
+ hostname &&
+ !evhttp_find_vhost(req->evcon->http_server, NULL, hostname))
+ req->flags |= EVHTTP_PROXY_REQUEST;
+
+ return (0);
+}
+
+const char *
+evhttp_find_header(const struct evkeyvalq *headers, const char *key)
+{
+ struct evkeyval *header;
+
+ TAILQ_FOREACH(header, headers, next) {
+ if (evutil_ascii_strcasecmp(header->key, key) == 0)
+ return (header->value);
+ }
+
+ return (NULL);
+}
+
+void
+evhttp_clear_headers(struct evkeyvalq *headers)
+{
+ struct evkeyval *header;
+
+ for (header = TAILQ_FIRST(headers);
+ header != NULL;
+ header = TAILQ_FIRST(headers)) {
+ TAILQ_REMOVE(headers, header, next);
+ mm_free(header->key);
+ mm_free(header->value);
+ mm_free(header);
+ }
+}
+
+/*
+ * Returns 0, if the header was successfully removed.
+ * Returns -1, if the header could not be found.
+ */
+
+int
+evhttp_remove_header(struct evkeyvalq *headers, const char *key)
+{
+ struct evkeyval *header;
+
+ TAILQ_FOREACH(header, headers, next) {
+ if (evutil_ascii_strcasecmp(header->key, key) == 0)
+ break;
+ }
+
+ if (header == NULL)
+ return (-1);
+
+ /* Free and remove the header that we found */
+ TAILQ_REMOVE(headers, header, next);
+ mm_free(header->key);
+ mm_free(header->value);
+ mm_free(header);
+
+ return (0);
+}
+
+static int
+evhttp_header_is_valid_value(const char *value)
+{
+ const char *p = value;
+
+ while ((p = strpbrk(p, "\r\n")) != NULL) {
+ /* we really expect only one new line */
+ p += strspn(p, "\r\n");
+ /* we expect a space or tab for continuation */
+ if (*p != ' ' && *p != '\t')
+ return (0);
+ }
+ return (1);
+}
+
+int
+evhttp_add_header(struct evkeyvalq *headers,
+ const char *key, const char *value)
+{
+ event_debug(("%s: key: %s val: %s\n", __func__, key, value));
+
+ if (strchr(key, '\r') != NULL || strchr(key, '\n') != NULL) {
+ /* drop illegal headers */
+ event_debug(("%s: dropping illegal header key\n", __func__));
+ return (-1);
+ }
+
+ if (!evhttp_header_is_valid_value(value)) {
+ event_debug(("%s: dropping illegal header value\n", __func__));
+ return (-1);
+ }
+
+ return (evhttp_add_header_internal(headers, key, value));
+}
+
+static int
+evhttp_add_header_internal(struct evkeyvalq *headers,
+ const char *key, const char *value)
+{
+ struct evkeyval *header = mm_calloc(1, sizeof(struct evkeyval));
+ if (header == NULL) {
+ event_warn("%s: calloc", __func__);
+ return (-1);
+ }
+ if ((header->key = mm_strdup(key)) == NULL) {
+ mm_free(header);
+ event_warn("%s: strdup", __func__);
+ return (-1);
+ }
+ if ((header->value = mm_strdup(value)) == NULL) {
+ mm_free(header->key);
+ mm_free(header);
+ event_warn("%s: strdup", __func__);
+ return (-1);
+ }
+
+ TAILQ_INSERT_TAIL(headers, header, next);
+
+ return (0);
+}
+
+/*
+ * Parses header lines from a request or a response into the specified
+ * request object given an event buffer.
+ *
+ * Returns
+ * DATA_CORRUPTED on error
+ * MORE_DATA_EXPECTED when we need to read more headers
+ * ALL_DATA_READ when all headers have been read.
+ */
+
+enum message_read_status
+evhttp_parse_firstline_(struct evhttp_request *req, struct evbuffer *buffer)
+{
+ char *line;
+ enum message_read_status status = ALL_DATA_READ;
+
+ size_t line_length;
+ /* XXX try */
+ line = evbuffer_readln(buffer, &line_length, EVBUFFER_EOL_CRLF);
+ if (line == NULL) {
+ if (req->evcon != NULL &&
+ evbuffer_get_length(buffer) > req->evcon->max_headers_size)
+ return (DATA_TOO_LONG);
+ else
+ return (MORE_DATA_EXPECTED);
+ }
+
+ if (req->evcon != NULL &&
+ line_length > req->evcon->max_headers_size) {
+ mm_free(line);
+ return (DATA_TOO_LONG);
+ }
+
+ req->headers_size = line_length;
+
+ switch (req->kind) {
+ case EVHTTP_REQUEST:
+ if (evhttp_parse_request_line(req, line) == -1)
+ status = DATA_CORRUPTED;
+ break;
+ case EVHTTP_RESPONSE:
+ if (evhttp_parse_response_line(req, line) == -1)
+ status = DATA_CORRUPTED;
+ break;
+ default:
+ status = DATA_CORRUPTED;
+ }
+
+ mm_free(line);
+ return (status);
+}
+
+static int
+evhttp_append_to_last_header(struct evkeyvalq *headers, char *line)
+{
+ struct evkeyval *header = TAILQ_LAST(headers, evkeyvalq);
+ char *newval;
+ size_t old_len, line_len;
+
+ if (header == NULL)
+ return (-1);
+
+ old_len = strlen(header->value);
+
+ /* Strip space from start and end of line. */
+ while (*line == ' ' || *line == '\t')
+ ++line;
+ evutil_rtrim_lws_(line);
+
+ line_len = strlen(line);
+
+ newval = mm_realloc(header->value, old_len + line_len + 2);
+ if (newval == NULL)
+ return (-1);
+
+ newval[old_len] = ' ';
+ memcpy(newval + old_len + 1, line, line_len + 1);
+ header->value = newval;
+
+ return (0);
+}
+
+enum message_read_status
+evhttp_parse_headers_(struct evhttp_request *req, struct evbuffer* buffer)
+{
+ enum message_read_status errcode = DATA_CORRUPTED;
+ char *line;
+ enum message_read_status status = MORE_DATA_EXPECTED;
+
+ struct evkeyvalq* headers = req->input_headers;
+ size_t line_length;
+ while ((line = evbuffer_readln(buffer, &line_length, EVBUFFER_EOL_CRLF))
+ != NULL) {
+ char *skey, *svalue;
+
+ req->headers_size += line_length;
+
+ if (req->evcon != NULL &&
+ req->headers_size > req->evcon->max_headers_size) {
+ errcode = DATA_TOO_LONG;
+ goto error;
+ }
+
+ if (*line == '\0') { /* Last header - Done */
+ status = ALL_DATA_READ;
+ mm_free(line);
+ break;
+ }
+
+ /* Check if this is a continuation line */
+ if (*line == ' ' || *line == '\t') {
+ if (evhttp_append_to_last_header(headers, line) == -1)
+ goto error;
+ mm_free(line);
+ continue;
+ }
+
+ /* Processing of header lines */
+ svalue = line;
+ skey = strsep(&svalue, ":");
+ if (svalue == NULL)
+ goto error;
+
+ svalue += strspn(svalue, " ");
+ evutil_rtrim_lws_(svalue);
+
+ if (evhttp_add_header(headers, skey, svalue) == -1)
+ goto error;
+
+ mm_free(line);
+ }
+
+ if (status == MORE_DATA_EXPECTED) {
+ if (req->evcon != NULL &&
+ req->headers_size + evbuffer_get_length(buffer) > req->evcon->max_headers_size)
+ return (DATA_TOO_LONG);
+ }
+
+ return (status);
+
+ error:
+ mm_free(line);
+ return (errcode);
+}
+
+static int
+evhttp_get_body_length(struct evhttp_request *req)
+{
+ struct evkeyvalq *headers = req->input_headers;
+ const char *content_length;
+ const char *connection;
+
+ content_length = evhttp_find_header(headers, "Content-Length");
+ connection = evhttp_find_header(headers, "Connection");
+
+ if (content_length == NULL && connection == NULL)
+ req->ntoread = -1;
+ else if (content_length == NULL &&
+ evutil_ascii_strcasecmp(connection, "Close") != 0) {
+ /* Bad combination, we don't know when it will end */
+ event_warnx("%s: we got no content length, but the "
+ "server wants to keep the connection open: %s.",
+ __func__, connection);
+ return (-1);
+ } else if (content_length == NULL) {
+ req->ntoread = -1;
+ } else {
+ char *endp;
+ ev_int64_t ntoread = evutil_strtoll(content_length, &endp, 10);
+ if (*content_length == '\0' || *endp != '\0' || ntoread < 0) {
+ event_debug(("%s: illegal content length: %s",
+ __func__, content_length));
+ return (-1);
+ }
+ req->ntoread = ntoread;
+ }
+
+ event_debug(("%s: bytes to read: "EV_I64_FMT" (in buffer "EV_SIZE_FMT")\n",
+ __func__, EV_I64_ARG(req->ntoread),
+ EV_SIZE_ARG(evbuffer_get_length(bufferevent_get_input(req->evcon->bufev)))));
+
+ return (0);
+}
+
+static int
+evhttp_method_may_have_body(enum evhttp_cmd_type type)
+{
+ switch (type) {
+ case EVHTTP_REQ_POST:
+ case EVHTTP_REQ_PUT:
+ case EVHTTP_REQ_PATCH:
+ return 1;
+ case EVHTTP_REQ_TRACE:
+ return 0;
+ /* XXX May any of the below methods have a body? */
+ case EVHTTP_REQ_GET:
+ case EVHTTP_REQ_HEAD:
+ case EVHTTP_REQ_DELETE:
+ case EVHTTP_REQ_OPTIONS:
+ case EVHTTP_REQ_CONNECT:
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static void
+evhttp_get_body(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+ const char *xfer_enc;
+
+ /* If this is a request without a body, then we are done */
+ if (req->kind == EVHTTP_REQUEST &&
+ !evhttp_method_may_have_body(req->type)) {
+ evhttp_connection_done(evcon);
+ return;
+ }
+ evcon->state = EVCON_READING_BODY;
+ xfer_enc = evhttp_find_header(req->input_headers, "Transfer-Encoding");
+ if (xfer_enc != NULL && evutil_ascii_strcasecmp(xfer_enc, "chunked") == 0) {
+ req->chunked = 1;
+ req->ntoread = -1;
+ } else {
+ if (evhttp_get_body_length(req) == -1) {
+ evhttp_connection_fail_(evcon,
+ EVREQ_HTTP_INVALID_HEADER);
+ return;
+ }
+ if (req->kind == EVHTTP_REQUEST && req->ntoread < 1) {
+ /* An incoming request with no content-length and no
+ * transfer-encoding has no body. */
+ evhttp_connection_done(evcon);
+ return;
+ }
+ }
+
+ /* Should we send a 100 Continue status line? */
+ if (req->kind == EVHTTP_REQUEST && REQ_VERSION_ATLEAST(req, 1, 1)) {
+ const char *expect;
+
+ expect = evhttp_find_header(req->input_headers, "Expect");
+ if (expect) {
+ if (!evutil_ascii_strcasecmp(expect, "100-continue")) {
+ /* XXX It would be nice to do some sanity
+ checking here. Does the resource exist?
+ Should the resource accept post requests? If
+ no, we should respond with an error. For
+ now, just optimistically tell the client to
+ send their message body. */
+ if (req->ntoread > 0) {
+ /* ntoread is ev_int64_t, max_body_size is ev_uint64_t */
+ if ((req->evcon->max_body_size <= EV_INT64_MAX) && (ev_uint64_t)req->ntoread > req->evcon->max_body_size) {
+ evhttp_send_error(req, HTTP_ENTITYTOOLARGE, NULL);
+ return;
+ }
+ }
+ if (!evbuffer_get_length(bufferevent_get_input(evcon->bufev)))
+ evhttp_send_continue(evcon, req);
+ } else {
+ evhttp_send_error(req, HTTP_EXPECTATIONFAILED,
+ NULL);
+ return;
+ }
+ }
+ }
+
+ evhttp_read_body(evcon, req);
+ /* note the request may have been freed in evhttp_read_body */
+}
+
+static void
+evhttp_read_firstline(struct evhttp_connection *evcon,
+ struct evhttp_request *req)
+{
+ enum message_read_status res;
+
+ res = evhttp_parse_firstline_(req, bufferevent_get_input(evcon->bufev));
+ if (res == DATA_CORRUPTED || res == DATA_TOO_LONG) {
+ /* Error while reading, terminate */
+ event_debug(("%s: bad header lines on "EV_SOCK_FMT"\n",
+ __func__, EV_SOCK_ARG(evcon->fd)));
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_INVALID_HEADER);
+ return;
+ } else if (res == MORE_DATA_EXPECTED) {
+ /* Need more header lines */
+ return;
+ }
+
+ evcon->state = EVCON_READING_HEADERS;
+ evhttp_read_header(evcon, req);
+}
+
+static void
+evhttp_read_header(struct evhttp_connection *evcon,
+ struct evhttp_request *req)
+{
+ enum message_read_status res;
+ evutil_socket_t fd = evcon->fd;
+
+ res = evhttp_parse_headers_(req, bufferevent_get_input(evcon->bufev));
+ if (res == DATA_CORRUPTED || res == DATA_TOO_LONG) {
+ /* Error while reading, terminate */
+ event_debug(("%s: bad header lines on "EV_SOCK_FMT"\n",
+ __func__, EV_SOCK_ARG(fd)));
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_INVALID_HEADER);
+ return;
+ } else if (res == MORE_DATA_EXPECTED) {
+ /* Need more header lines */
+ return;
+ }
+
+ /* Callback can shut down connection with negative return value */
+ if (req->header_cb != NULL) {
+ if ((*req->header_cb)(req, req->cb_arg) < 0) {
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_EOF);
+ return;
+ }
+ }
+
+ /* Done reading headers, do the real work */
+ switch (req->kind) {
+ case EVHTTP_REQUEST:
+ event_debug(("%s: checking for post data on "EV_SOCK_FMT"\n",
+ __func__, EV_SOCK_ARG(fd)));
+ evhttp_get_body(evcon, req);
+ /* note the request may have been freed in evhttp_get_body */
+ break;
+
+ case EVHTTP_RESPONSE:
+ /* Start over if we got a 100 Continue response. */
+ if (req->response_code == 100) {
+ evhttp_start_read_(evcon);
+ return;
+ }
+ if (!evhttp_response_needs_body(req)) {
+ event_debug(("%s: skipping body for code %d\n",
+ __func__, req->response_code));
+ evhttp_connection_done(evcon);
+ } else {
+ event_debug(("%s: start of read body for %s on "
+ EV_SOCK_FMT"\n",
+ __func__, req->remote_host, EV_SOCK_ARG(fd)));
+ evhttp_get_body(evcon, req);
+ /* note the request may have been freed in
+ * evhttp_get_body */
+ }
+ break;
+
+ default:
+ event_warnx("%s: bad header on "EV_SOCK_FMT, __func__,
+ EV_SOCK_ARG(fd));
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_INVALID_HEADER);
+ break;
+ }
+ /* request may have been freed above */
+}
+
+/*
+ * Creates a TCP connection to the specified port and executes a callback
+ * when finished. Failure or success is indicate by the passed connection
+ * object.
+ *
+ * Although this interface accepts a hostname, it is intended to take
+ * only numeric hostnames so that non-blocking DNS resolution can
+ * happen elsewhere.
+ */
+
+struct evhttp_connection *
+evhttp_connection_new(const char *address, unsigned short port)
+{
+ return (evhttp_connection_base_new(NULL, NULL, address, port));
+}
+
+struct evhttp_connection *
+evhttp_connection_base_bufferevent_new(struct event_base *base, struct evdns_base *dnsbase, struct bufferevent* bev,
+ const char *address, unsigned short port)
+{
+ struct evhttp_connection *evcon = NULL;
+
+ event_debug(("Attempting connection to %s:%d\n", address, port));
+
+ if ((evcon = mm_calloc(1, sizeof(struct evhttp_connection))) == NULL) {
+ event_warn("%s: calloc failed", __func__);
+ goto error;
+ }
+
+ evcon->fd = -1;
+ evcon->port = port;
+
+ evcon->max_headers_size = EV_SIZE_MAX;
+ evcon->max_body_size = EV_SIZE_MAX;
+
+ evutil_timerclear(&evcon->timeout);
+ evcon->retry_cnt = evcon->retry_max = 0;
+
+ if ((evcon->address = mm_strdup(address)) == NULL) {
+ event_warn("%s: strdup failed", __func__);
+ goto error;
+ }
+
+ if (bev == NULL) {
+ if (!(bev = bufferevent_socket_new(base, -1, 0))) {
+ event_warn("%s: bufferevent_socket_new failed", __func__);
+ goto error;
+ }
+ }
+
+ bufferevent_setcb(bev, evhttp_read_cb, evhttp_write_cb, evhttp_error_cb, evcon);
+ evcon->bufev = bev;
+
+ evcon->state = EVCON_DISCONNECTED;
+ TAILQ_INIT(&evcon->requests);
+
+ evcon->initial_retry_timeout.tv_sec = 2;
+ evcon->initial_retry_timeout.tv_usec = 0;
+
+ if (base != NULL) {
+ evcon->base = base;
+ if (bufferevent_get_base(bev) != base)
+ bufferevent_base_set(base, evcon->bufev);
+ }
+
+ event_deferred_cb_init_(
+ &evcon->read_more_deferred_cb,
+ bufferevent_get_priority(bev),
+ evhttp_deferred_read_cb, evcon);
+
+ evcon->dns_base = dnsbase;
+ evcon->ai_family = AF_UNSPEC;
+
+ return (evcon);
+
+ error:
+ if (evcon != NULL)
+ evhttp_connection_free(evcon);
+ return (NULL);
+}
+
+struct bufferevent* evhttp_connection_get_bufferevent(struct evhttp_connection *evcon)
+{
+ return evcon->bufev;
+}
+
+struct evhttp *
+evhttp_connection_get_server(struct evhttp_connection *evcon)
+{
+ return evcon->http_server;
+}
+
+struct evhttp_connection *
+evhttp_connection_base_new(struct event_base *base, struct evdns_base *dnsbase,
+ const char *address, unsigned short port)
+{
+ return evhttp_connection_base_bufferevent_new(base, dnsbase, NULL, address, port);
+}
+
+void evhttp_connection_set_family(struct evhttp_connection *evcon,
+ int family)
+{
+ evcon->ai_family = family;
+}
+
+int evhttp_connection_set_flags(struct evhttp_connection *evcon,
+ int flags)
+{
+ int avail_flags = 0;
+ avail_flags |= EVHTTP_CON_REUSE_CONNECTED_ADDR;
+
+ if (flags & ~avail_flags || flags > EVHTTP_CON_PUBLIC_FLAGS_END)
+ return 1;
+ evcon->flags &= ~avail_flags;
+
+ evcon->flags |= flags;
+
+ return 0;
+}
+
+void
+evhttp_connection_set_base(struct evhttp_connection *evcon,
+ struct event_base *base)
+{
+ EVUTIL_ASSERT(evcon->base == NULL);
+ EVUTIL_ASSERT(evcon->state == EVCON_DISCONNECTED);
+ evcon->base = base;
+ bufferevent_base_set(base, evcon->bufev);
+}
+
+void
+evhttp_connection_set_timeout(struct evhttp_connection *evcon,
+ int timeout_in_secs)
+{
+ if (timeout_in_secs == -1)
+ evhttp_connection_set_timeout_tv(evcon, NULL);
+ else {
+ struct timeval tv;
+ tv.tv_sec = timeout_in_secs;
+ tv.tv_usec = 0;
+ evhttp_connection_set_timeout_tv(evcon, &tv);
+ }
+}
+
+void
+evhttp_connection_set_timeout_tv(struct evhttp_connection *evcon,
+ const struct timeval* tv)
+{
+ if (tv) {
+ evcon->timeout = *tv;
+ bufferevent_set_timeouts(evcon->bufev, &evcon->timeout, &evcon->timeout);
+ } else {
+ const struct timeval read_tv = { HTTP_READ_TIMEOUT, 0 };
+ const struct timeval write_tv = { HTTP_WRITE_TIMEOUT, 0 };
+ evutil_timerclear(&evcon->timeout);
+ bufferevent_set_timeouts(evcon->bufev, &read_tv, &write_tv);
+ }
+}
+
+void
+evhttp_connection_set_initial_retry_tv(struct evhttp_connection *evcon,
+ const struct timeval *tv)
+{
+ if (tv) {
+ evcon->initial_retry_timeout = *tv;
+ } else {
+ evutil_timerclear(&evcon->initial_retry_timeout);
+ evcon->initial_retry_timeout.tv_sec = 2;
+ }
+}
+
+void
+evhttp_connection_set_retries(struct evhttp_connection *evcon,
+ int retry_max)
+{
+ evcon->retry_max = retry_max;
+}
+
+void
+evhttp_connection_set_closecb(struct evhttp_connection *evcon,
+ void (*cb)(struct evhttp_connection *, void *), void *cbarg)
+{
+ evcon->closecb = cb;
+ evcon->closecb_arg = cbarg;
+}
+
+void
+evhttp_connection_get_peer(struct evhttp_connection *evcon,
+ char **address, ev_uint16_t *port)
+{
+ *address = evcon->address;
+ *port = evcon->port;
+}
+
+const struct sockaddr*
+evhttp_connection_get_addr(struct evhttp_connection *evcon)
+{
+ return bufferevent_socket_get_conn_address_(evcon->bufev);
+}
+
+int
+evhttp_connection_connect_(struct evhttp_connection *evcon)
+{
+ int old_state = evcon->state;
+ const char *address = evcon->address;
+ const struct sockaddr *sa = evhttp_connection_get_addr(evcon);
+ int ret;
+
+ if (evcon->state == EVCON_CONNECTING)
+ return (0);
+
+ evhttp_connection_reset_(evcon);
+
+ EVUTIL_ASSERT(!(evcon->flags & EVHTTP_CON_INCOMING));
+ evcon->flags |= EVHTTP_CON_OUTGOING;
+
+ if (evcon->bind_address || evcon->bind_port) {
+ evcon->fd = bind_socket(
+ evcon->bind_address, evcon->bind_port, 0 /*reuse*/);
+ if (evcon->fd == -1) {
+ event_debug(("%s: failed to bind to \"%s\"",
+ __func__, evcon->bind_address));
+ return (-1);
+ }
+
+ bufferevent_setfd(evcon->bufev, evcon->fd);
+ } else {
+ bufferevent_setfd(evcon->bufev, -1);
+ }
+
+ /* Set up a callback for successful connection setup */
+ bufferevent_setcb(evcon->bufev,
+ NULL /* evhttp_read_cb */,
+ NULL /* evhttp_write_cb */,
+ evhttp_connection_cb,
+ evcon);
+ if (!evutil_timerisset(&evcon->timeout)) {
+ const struct timeval conn_tv = { HTTP_CONNECT_TIMEOUT, 0 };
+ bufferevent_set_timeouts(evcon->bufev, &conn_tv, &conn_tv);
+ } else {
+ bufferevent_set_timeouts(evcon->bufev, &evcon->timeout, &evcon->timeout);
+ }
+ /* make sure that we get a write callback */
+ bufferevent_enable(evcon->bufev, EV_WRITE);
+
+ evcon->state = EVCON_CONNECTING;
+
+ if (evcon->flags & EVHTTP_CON_REUSE_CONNECTED_ADDR &&
+ sa &&
+ (sa->sa_family == AF_INET || sa->sa_family == AF_INET6)) {
+ int socklen = sizeof(struct sockaddr_in);
+ if (sa->sa_family == AF_INET6) {
+ socklen = sizeof(struct sockaddr_in6);
+ }
+ ret = bufferevent_socket_connect(evcon->bufev, sa, socklen);
+ } else {
+ ret = bufferevent_socket_connect_hostname(evcon->bufev,
+ evcon->dns_base, evcon->ai_family, address, evcon->port);
+ }
+
+ if (ret < 0) {
+ evcon->state = old_state;
+ event_sock_warn(evcon->fd, "%s: connection to \"%s\" failed",
+ __func__, evcon->address);
+ /* some operating systems return ECONNREFUSED immediately
+ * when connecting to a local address. the cleanup is going
+ * to reschedule this function call.
+ */
+ evhttp_connection_cb_cleanup(evcon);
+ return (0);
+ }
+
+ return (0);
+}
+
+/*
+ * Starts an HTTP request on the provided evhttp_connection object.
+ * If the connection object is not connected to the web server already,
+ * this will start the connection.
+ */
+
+int
+evhttp_make_request(struct evhttp_connection *evcon,
+ struct evhttp_request *req,
+ enum evhttp_cmd_type type, const char *uri)
+{
+ /* We are making a request */
+ req->kind = EVHTTP_REQUEST;
+ req->type = type;
+ if (req->uri != NULL)
+ mm_free(req->uri);
+ if ((req->uri = mm_strdup(uri)) == NULL) {
+ event_warn("%s: strdup", __func__);
+ evhttp_request_free_auto(req);
+ return (-1);
+ }
+
+ /* Set the protocol version if it is not supplied */
+ if (!req->major && !req->minor) {
+ req->major = 1;
+ req->minor = 1;
+ }
+
+ EVUTIL_ASSERT(req->evcon == NULL);
+ req->evcon = evcon;
+ EVUTIL_ASSERT(!(req->flags & EVHTTP_REQ_OWN_CONNECTION));
+
+ TAILQ_INSERT_TAIL(&evcon->requests, req, next);
+
+ /* If the connection object is not connected; make it so */
+ if (!evhttp_connected(evcon)) {
+ int res = evhttp_connection_connect_(evcon);
+ /* evhttp_connection_fail_(), which is called through
+ * evhttp_connection_connect_(), assumes that req lies in
+ * evcon->requests. Thus, enqueue the request in advance and
+ * remove it in the error case. */
+ if (res != 0)
+ TAILQ_REMOVE(&evcon->requests, req, next);
+
+ return res;
+ }
+
+ /*
+ * If it's connected already and we are the first in the queue,
+ * then we can dispatch this request immediately. Otherwise, it
+ * will be dispatched once the pending requests are completed.
+ */
+ if (TAILQ_FIRST(&evcon->requests) == req)
+ evhttp_request_dispatch(evcon);
+
+ return (0);
+}
+
+void
+evhttp_cancel_request(struct evhttp_request *req)
+{
+ struct evhttp_connection *evcon = req->evcon;
+ if (evcon != NULL) {
+ /* We need to remove it from the connection */
+ if (TAILQ_FIRST(&evcon->requests) == req) {
+ /* it's currently being worked on, so reset
+ * the connection.
+ */
+ evhttp_connection_fail_(evcon,
+ EVREQ_HTTP_REQUEST_CANCEL);
+
+ /* connection fail freed the request */
+ return;
+ } else {
+ /* otherwise, we can just remove it from the
+ * queue
+ */
+ TAILQ_REMOVE(&evcon->requests, req, next);
+ }
+ }
+
+ evhttp_request_free_auto(req);
+}
+
+/*
+ * Reads data from file descriptor into request structure
+ * Request structure needs to be set up correctly.
+ */
+
+void
+evhttp_start_read_(struct evhttp_connection *evcon)
+{
+ bufferevent_disable(evcon->bufev, EV_WRITE);
+ bufferevent_enable(evcon->bufev, EV_READ);
+
+ evcon->state = EVCON_READING_FIRSTLINE;
+ /* Reset the bufferevent callbacks */
+ bufferevent_setcb(evcon->bufev,
+ evhttp_read_cb,
+ evhttp_write_cb,
+ evhttp_error_cb,
+ evcon);
+
+ /* If there's still data pending, process it next time through the
+ * loop. Don't do it now; that could get recusive. */
+ if (evbuffer_get_length(bufferevent_get_input(evcon->bufev))) {
+ event_deferred_cb_schedule_(get_deferred_queue(evcon),
+ &evcon->read_more_deferred_cb);
+ }
+}
+
+static void
+evhttp_send_done(struct evhttp_connection *evcon, void *arg)
+{
+ int need_close;
+ struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+ TAILQ_REMOVE(&evcon->requests, req, next);
+
+ if (req->on_complete_cb != NULL) {
+ req->on_complete_cb(req, req->on_complete_cb_arg);
+ }
+
+ need_close =
+ (REQ_VERSION_BEFORE(req, 1, 1) &&
+ !evhttp_is_connection_keepalive(req->input_headers)) ||
+ evhttp_is_request_connection_close(req);
+
+ EVUTIL_ASSERT(req->flags & EVHTTP_REQ_OWN_CONNECTION);
+ evhttp_request_free(req);
+
+ if (need_close) {
+ evhttp_connection_free(evcon);
+ return;
+ }
+
+ /* we have a persistent connection; try to accept another request. */
+ if (evhttp_associate_new_request_with_connection(evcon) == -1) {
+ evhttp_connection_free(evcon);
+ }
+}
+
+/*
+ * Returns an error page.
+ */
+
+void
+evhttp_send_error(struct evhttp_request *req, int error, const char *reason)
+{
+
+#define ERR_FORMAT "<HTML><HEAD>\n" \
+ "<TITLE>%d %s</TITLE>\n" \
+ "</HEAD><BODY>\n" \
+ "<H1>%s</H1>\n" \
+ "</BODY></HTML>\n"
+
+ struct evbuffer *buf = evbuffer_new();
+ if (buf == NULL) {
+ /* if we cannot allocate memory; we just drop the connection */
+ evhttp_connection_free(req->evcon);
+ return;
+ }
+ if (reason == NULL) {
+ reason = evhttp_response_phrase_internal(error);
+ }
+
+ evhttp_response_code_(req, error, reason);
+
+ evbuffer_add_printf(buf, ERR_FORMAT, error, reason, reason);
+
+ evhttp_send_page_(req, buf);
+
+ evbuffer_free(buf);
+#undef ERR_FORMAT
+}
+
+/* Requires that headers and response code are already set up */
+
+static inline void
+evhttp_send(struct evhttp_request *req, struct evbuffer *databuf)
+{
+ struct evhttp_connection *evcon = req->evcon;
+
+ if (evcon == NULL) {
+ evhttp_request_free(req);
+ return;
+ }
+
+ EVUTIL_ASSERT(TAILQ_FIRST(&evcon->requests) == req);
+
+ /* we expect no more calls form the user on this request */
+ req->userdone = 1;
+
+ /* xxx: not sure if we really should expose the data buffer this way */
+ if (databuf != NULL)
+ evbuffer_add_buffer(req->output_buffer, databuf);
+
+ /* Adds headers to the response */
+ evhttp_make_header(evcon, req);
+
+ evhttp_write_buffer(evcon, evhttp_send_done, NULL);
+}
+
+void
+evhttp_send_reply(struct evhttp_request *req, int code, const char *reason,
+ struct evbuffer *databuf)
+{
+ evhttp_response_code_(req, code, reason);
+
+ evhttp_send(req, databuf);
+}
+
+void
+evhttp_send_reply_start(struct evhttp_request *req, int code,
+ const char *reason)
+{
+ evhttp_response_code_(req, code, reason);
+ if (evhttp_find_header(req->output_headers, "Content-Length") == NULL &&
+ REQ_VERSION_ATLEAST(req, 1, 1) &&
+ evhttp_response_needs_body(req)) {
+ /*
+ * prefer HTTP/1.1 chunked encoding to closing the connection;
+ * note RFC 2616 section 4.4 forbids it with Content-Length:
+ * and it's not necessary then anyway.
+ */
+ evhttp_add_header(req->output_headers, "Transfer-Encoding",
+ "chunked");
+ req->chunked = 1;
+ } else {
+ req->chunked = 0;
+ }
+ evhttp_make_header(req->evcon, req);
+ evhttp_write_buffer(req->evcon, NULL, NULL);
+}
+
+void
+evhttp_send_reply_chunk_with_cb(struct evhttp_request *req, struct evbuffer *databuf,
+ void (*cb)(struct evhttp_connection *, void *), void *arg)
+{
+ struct evhttp_connection *evcon = req->evcon;
+ struct evbuffer *output;
+
+ if (evcon == NULL)
+ return;
+
+ output = bufferevent_get_output(evcon->bufev);
+
+ if (evbuffer_get_length(databuf) == 0)
+ return;
+ if (!evhttp_response_needs_body(req))
+ return;
+ if (req->chunked) {
+ evbuffer_add_printf(output, "%x\r\n",
+ (unsigned)evbuffer_get_length(databuf));
+ }
+ evbuffer_add_buffer(output, databuf);
+ if (req->chunked) {
+ evbuffer_add(output, "\r\n", 2);
+ }
+ evhttp_write_buffer(evcon, cb, arg);
+}
+
+void
+evhttp_send_reply_chunk(struct evhttp_request *req, struct evbuffer *databuf)
+{
+ evhttp_send_reply_chunk_with_cb(req, databuf, NULL, NULL);
+}
+void
+evhttp_send_reply_end(struct evhttp_request *req)
+{
+ struct evhttp_connection *evcon = req->evcon;
+ struct evbuffer *output;
+
+ if (evcon == NULL) {
+ evhttp_request_free(req);
+ return;
+ }
+
+ output = bufferevent_get_output(evcon->bufev);
+
+ /* we expect no more calls form the user on this request */
+ req->userdone = 1;
+
+ if (req->chunked) {
+ evbuffer_add(output, "0\r\n\r\n", 5);
+ evhttp_write_buffer(req->evcon, evhttp_send_done, NULL);
+ req->chunked = 0;
+ } else if (evbuffer_get_length(output) == 0) {
+ /* let the connection know that we are done with the request */
+ evhttp_send_done(evcon, NULL);
+ } else {
+ /* make the callback execute after all data has been written */
+ evcon->cb = evhttp_send_done;
+ evcon->cb_arg = NULL;
+ }
+}
+
+static const char *informational_phrases[] = {
+ /* 100 */ "Continue",
+ /* 101 */ "Switching Protocols"
+};
+
+static const char *success_phrases[] = {
+ /* 200 */ "OK",
+ /* 201 */ "Created",
+ /* 202 */ "Accepted",
+ /* 203 */ "Non-Authoritative Information",
+ /* 204 */ "No Content",
+ /* 205 */ "Reset Content",
+ /* 206 */ "Partial Content"
+};
+
+static const char *redirection_phrases[] = {
+ /* 300 */ "Multiple Choices",
+ /* 301 */ "Moved Permanently",
+ /* 302 */ "Found",
+ /* 303 */ "See Other",
+ /* 304 */ "Not Modified",
+ /* 305 */ "Use Proxy",
+ /* 307 */ "Temporary Redirect"
+};
+
+static const char *client_error_phrases[] = {
+ /* 400 */ "Bad Request",
+ /* 401 */ "Unauthorized",
+ /* 402 */ "Payment Required",
+ /* 403 */ "Forbidden",
+ /* 404 */ "Not Found",
+ /* 405 */ "Method Not Allowed",
+ /* 406 */ "Not Acceptable",
+ /* 407 */ "Proxy Authentication Required",
+ /* 408 */ "Request Time-out",
+ /* 409 */ "Conflict",
+ /* 410 */ "Gone",
+ /* 411 */ "Length Required",
+ /* 412 */ "Precondition Failed",
+ /* 413 */ "Request Entity Too Large",
+ /* 414 */ "Request-URI Too Large",
+ /* 415 */ "Unsupported Media Type",
+ /* 416 */ "Requested range not satisfiable",
+ /* 417 */ "Expectation Failed"
+};
+
+static const char *server_error_phrases[] = {
+ /* 500 */ "Internal Server Error",
+ /* 501 */ "Not Implemented",
+ /* 502 */ "Bad Gateway",
+ /* 503 */ "Service Unavailable",
+ /* 504 */ "Gateway Time-out",
+ /* 505 */ "HTTP Version not supported"
+};
+
+struct response_class {
+ const char *name;
+ size_t num_responses;
+ const char **responses;
+};
+
+#ifndef MEMBERSOF
+#define MEMBERSOF(x) (sizeof(x)/sizeof(x[0]))
+#endif
+
+static const struct response_class response_classes[] = {
+ /* 1xx */ { "Informational", MEMBERSOF(informational_phrases), informational_phrases },
+ /* 2xx */ { "Success", MEMBERSOF(success_phrases), success_phrases },
+ /* 3xx */ { "Redirection", MEMBERSOF(redirection_phrases), redirection_phrases },
+ /* 4xx */ { "Client Error", MEMBERSOF(client_error_phrases), client_error_phrases },
+ /* 5xx */ { "Server Error", MEMBERSOF(server_error_phrases), server_error_phrases }
+};
+
+static const char *
+evhttp_response_phrase_internal(int code)
+{
+ int klass = code / 100 - 1;
+ int subcode = code % 100;
+
+ /* Unknown class - can't do any better here */
+ if (klass < 0 || klass >= (int) MEMBERSOF(response_classes))
+ return "Unknown Status Class";
+
+ /* Unknown sub-code, return class name at least */
+ if (subcode >= (int) response_classes[klass].num_responses)
+ return response_classes[klass].name;
+
+ return response_classes[klass].responses[subcode];
+}
+
+void
+evhttp_response_code_(struct evhttp_request *req, int code, const char *reason)
+{
+ req->kind = EVHTTP_RESPONSE;
+ req->response_code = code;
+ if (req->response_code_line != NULL)
+ mm_free(req->response_code_line);
+ if (reason == NULL)
+ reason = evhttp_response_phrase_internal(code);
+ req->response_code_line = mm_strdup(reason);
+ if (req->response_code_line == NULL) {
+ event_warn("%s: strdup", __func__);
+ /* XXX what else can we do? */
+ }
+}
+
+void
+evhttp_send_page_(struct evhttp_request *req, struct evbuffer *databuf)
+{
+ if (!req->major || !req->minor) {
+ req->major = 1;
+ req->minor = 1;
+ }
+
+ if (req->kind != EVHTTP_RESPONSE)
+ evhttp_response_code_(req, 200, "OK");
+
+ evhttp_clear_headers(req->output_headers);
+ evhttp_add_header(req->output_headers, "Content-Type", "text/html");
+ evhttp_add_header(req->output_headers, "Connection", "close");
+
+ evhttp_send(req, databuf);
+}
+
+static const char uri_chars[256] = {
+ /* 0 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+ /* 64 */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0,
+ /* 128 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 192 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+#define CHAR_IS_UNRESERVED(c) \
+ (uri_chars[(unsigned char)(c)])
+
+/*
+ * Helper functions to encode/decode a string for inclusion in a URI.
+ * The returned string must be freed by the caller.
+ */
+char *
+evhttp_uriencode(const char *uri, ev_ssize_t len, int space_as_plus)
+{
+ struct evbuffer *buf = evbuffer_new();
+ const char *p, *end;
+ char *result;
+
+ if (buf == NULL)
+ return (NULL);
+
+ if (len >= 0)
+ end = uri+len;
+ else
+ end = uri+strlen(uri);
+
+ for (p = uri; p < end; p++) {
+ if (CHAR_IS_UNRESERVED(*p)) {
+ evbuffer_add(buf, p, 1);
+ } else if (*p == ' ' && space_as_plus) {
+ evbuffer_add(buf, "+", 1);
+ } else {
+ evbuffer_add_printf(buf, "%%%02X", (unsigned char)(*p));
+ }
+ }
+ evbuffer_add(buf, "", 1); /* NUL-terminator. */
+ result = mm_malloc(evbuffer_get_length(buf));
+ if (result)
+ evbuffer_remove(buf, result, evbuffer_get_length(buf));
+ evbuffer_free(buf);
+
+ return (result);
+}
+
+char *
+evhttp_encode_uri(const char *str)
+{
+ return evhttp_uriencode(str, -1, 0);
+}
+
+/*
+ * @param decode_plus_ctl: if 1, we decode plus into space. If 0, we don't.
+ * If -1, when true we transform plus to space only after we've seen
+ * a ?. -1 is deprecated.
+ * @return the number of bytes written to 'ret'.
+ */
+int
+evhttp_decode_uri_internal(
+ const char *uri, size_t length, char *ret, int decode_plus_ctl)
+{
+ char c;
+ int j;
+ int decode_plus = (decode_plus_ctl == 1) ? 1: 0;
+ unsigned i;
+
+ for (i = j = 0; i < length; i++) {
+ c = uri[i];
+ if (c == '?') {
+ if (decode_plus_ctl < 0)
+ decode_plus = 1;
+ } else if (c == '+' && decode_plus) {
+ c = ' ';
+ } else if ((i + 2) < length && c == '%' &&
+ EVUTIL_ISXDIGIT_(uri[i+1]) && EVUTIL_ISXDIGIT_(uri[i+2])) {
+ char tmp[3];
+ tmp[0] = uri[i+1];
+ tmp[1] = uri[i+2];
+ tmp[2] = '\0';
+ c = (char)strtol(tmp, NULL, 16);
+ i += 2;
+ }
+ ret[j++] = c;
+ }
+ ret[j] = '\0';
+
+ return (j);
+}
+
+/* deprecated */
+char *
+evhttp_decode_uri(const char *uri)
+{
+ char *ret;
+
+ if ((ret = mm_malloc(strlen(uri) + 1)) == NULL) {
+ event_warn("%s: malloc(%lu)", __func__,
+ (unsigned long)(strlen(uri) + 1));
+ return (NULL);
+ }
+
+ evhttp_decode_uri_internal(uri, strlen(uri),
+ ret, -1 /*always_decode_plus*/);
+
+ return (ret);
+}
+
+char *
+evhttp_uridecode(const char *uri, int decode_plus, size_t *size_out)
+{
+ char *ret;
+ int n;
+
+ if ((ret = mm_malloc(strlen(uri) + 1)) == NULL) {
+ event_warn("%s: malloc(%lu)", __func__,
+ (unsigned long)(strlen(uri) + 1));
+ return (NULL);
+ }
+
+ n = evhttp_decode_uri_internal(uri, strlen(uri),
+ ret, !!decode_plus/*always_decode_plus*/);
+
+ if (size_out) {
+ EVUTIL_ASSERT(n >= 0);
+ *size_out = (size_t)n;
+ }
+
+ return (ret);
+}
+
+/*
+ * Helper function to parse out arguments in a query.
+ * The arguments are separated by key and value.
+ */
+
+static int
+evhttp_parse_query_impl(const char *str, struct evkeyvalq *headers,
+ int is_whole_uri)
+{
+ char *line=NULL;
+ char *argument;
+ char *p;
+ const char *query_part;
+ int result = -1;
+ struct evhttp_uri *uri=NULL;
+
+ TAILQ_INIT(headers);
+
+ if (is_whole_uri) {
+ uri = evhttp_uri_parse(str);
+ if (!uri)
+ goto error;
+ query_part = evhttp_uri_get_query(uri);
+ } else {
+ query_part = str;
+ }
+
+ /* No arguments - we are done */
+ if (!query_part || !strlen(query_part)) {
+ result = 0;
+ goto done;
+ }
+
+ if ((line = mm_strdup(query_part)) == NULL) {
+ event_warn("%s: strdup", __func__);
+ goto error;
+ }
+
+ p = argument = line;
+ while (p != NULL && *p != '\0') {
+ char *key, *value, *decoded_value;
+ argument = strsep(&p, "&");
+
+ value = argument;
+ key = strsep(&value, "=");
+ if (value == NULL || *key == '\0') {
+ goto error;
+ }
+
+ if ((decoded_value = mm_malloc(strlen(value) + 1)) == NULL) {
+ event_warn("%s: mm_malloc", __func__);
+ goto error;
+ }
+ evhttp_decode_uri_internal(value, strlen(value),
+ decoded_value, 1 /*always_decode_plus*/);
+ event_debug(("Query Param: %s -> %s\n", key, decoded_value));
+ evhttp_add_header_internal(headers, key, decoded_value);
+ mm_free(decoded_value);
+ }
+
+ result = 0;
+ goto done;
+error:
+ evhttp_clear_headers(headers);
+done:
+ if (line)
+ mm_free(line);
+ if (uri)
+ evhttp_uri_free(uri);
+ return result;
+}
+
+int
+evhttp_parse_query(const char *uri, struct evkeyvalq *headers)
+{
+ return evhttp_parse_query_impl(uri, headers, 1);
+}
+int
+evhttp_parse_query_str(const char *uri, struct evkeyvalq *headers)
+{
+ return evhttp_parse_query_impl(uri, headers, 0);
+}
+
+static struct evhttp_cb *
+evhttp_dispatch_callback(struct httpcbq *callbacks, struct evhttp_request *req)
+{
+ struct evhttp_cb *cb;
+ size_t offset = 0;
+ char *translated;
+ const char *path;
+
+ /* Test for different URLs */
+ path = evhttp_uri_get_path(req->uri_elems);
+ offset = strlen(path);
+ if ((translated = mm_malloc(offset + 1)) == NULL)
+ return (NULL);
+ evhttp_decode_uri_internal(path, offset, translated,
+ 0 /* decode_plus */);
+
+ TAILQ_FOREACH(cb, callbacks, next) {
+ if (!strcmp(cb->what, translated)) {
+ mm_free(translated);
+ return (cb);
+ }
+ }
+
+ mm_free(translated);
+ return (NULL);
+}
+
+
+static int
+prefix_suffix_match(const char *pattern, const char *name, int ignorecase)
+{
+ char c;
+
+ while (1) {
+ switch (c = *pattern++) {
+ case '\0':
+ return *name == '\0';
+
+ case '*':
+ while (*name != '\0') {
+ if (prefix_suffix_match(pattern, name,
+ ignorecase))
+ return (1);
+ ++name;
+ }
+ return (0);
+ default:
+ if (c != *name) {
+ if (!ignorecase ||
+ EVUTIL_TOLOWER_(c) != EVUTIL_TOLOWER_(*name))
+ return (0);
+ }
+ ++name;
+ }
+ }
+ /* NOTREACHED */
+}
+
+/*
+ Search the vhost hierarchy beginning with http for a server alias
+ matching hostname. If a match is found, and outhttp is non-null,
+ outhttp is set to the matching http object and 1 is returned.
+*/
+
+static int
+evhttp_find_alias(struct evhttp *http, struct evhttp **outhttp,
+ const char *hostname)
+{
+ struct evhttp_server_alias *alias;
+ struct evhttp *vhost;
+
+ TAILQ_FOREACH(alias, &http->aliases, next) {
+ /* XXX Do we need to handle IP addresses? */
+ if (!evutil_ascii_strcasecmp(alias->alias, hostname)) {
+ if (outhttp)
+ *outhttp = http;
+ return 1;
+ }
+ }
+
+ /* XXX It might be good to avoid recursion here, but I don't
+ see a way to do that w/o a list. */
+ TAILQ_FOREACH(vhost, &http->virtualhosts, next_vhost) {
+ if (evhttp_find_alias(vhost, outhttp, hostname))
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ Attempts to find the best http object to handle a request for a hostname.
+ All aliases for the root http object and vhosts are searched for an exact
+ match. Then, the vhost hierarchy is traversed again for a matching
+ pattern.
+
+ If an alias or vhost is matched, 1 is returned, and outhttp, if non-null,
+ is set with the best matching http object. If there are no matches, the
+ root http object is stored in outhttp and 0 is returned.
+*/
+
+static int
+evhttp_find_vhost(struct evhttp *http, struct evhttp **outhttp,
+ const char *hostname)
+{
+ struct evhttp *vhost;
+ struct evhttp *oldhttp;
+ int match_found = 0;
+
+ if (evhttp_find_alias(http, outhttp, hostname))
+ return 1;
+
+ do {
+ oldhttp = http;
+ TAILQ_FOREACH(vhost, &http->virtualhosts, next_vhost) {
+ if (prefix_suffix_match(vhost->vhost_pattern,
+ hostname, 1 /* ignorecase */)) {
+ http = vhost;
+ match_found = 1;
+ break;
+ }
+ }
+ } while (oldhttp != http);
+
+ if (outhttp)
+ *outhttp = http;
+
+ return match_found;
+}
+
+static void
+evhttp_handle_request(struct evhttp_request *req, void *arg)
+{
+ struct evhttp *http = arg;
+ struct evhttp_cb *cb = NULL;
+ const char *hostname;
+
+ /* we have a new request on which the user needs to take action */
+ req->userdone = 0;
+
+ if (req->type == 0 || req->uri == NULL) {
+ evhttp_send_error(req, HTTP_BADREQUEST, NULL);
+ return;
+ }
+
+ if ((http->allowed_methods & req->type) == 0) {
+ event_debug(("Rejecting disallowed method %x (allowed: %x)\n",
+ (unsigned)req->type, (unsigned)http->allowed_methods));
+ evhttp_send_error(req, HTTP_NOTIMPLEMENTED, NULL);
+ return;
+ }
+
+ /* handle potential virtual hosts */
+ hostname = evhttp_request_get_host(req);
+ if (hostname != NULL) {
+ evhttp_find_vhost(http, &http, hostname);
+ }
+
+ if ((cb = evhttp_dispatch_callback(&http->callbacks, req)) != NULL) {
+ (*cb->cb)(req, cb->cbarg);
+ return;
+ }
+
+ /* Generic call back */
+ if (http->gencb) {
+ (*http->gencb)(req, http->gencbarg);
+ return;
+ } else {
+ /* We need to send a 404 here */
+#define ERR_FORMAT "<html><head>" \
+ "<title>404 Not Found</title>" \
+ "</head><body>" \
+ "<h1>Not Found</h1>" \
+ "<p>The requested URL %s was not found on this server.</p>"\
+ "</body></html>\n"
+
+ char *escaped_html;
+ struct evbuffer *buf;
+
+ if ((escaped_html = evhttp_htmlescape(req->uri)) == NULL) {
+ evhttp_connection_free(req->evcon);
+ return;
+ }
+
+ if ((buf = evbuffer_new()) == NULL) {
+ mm_free(escaped_html);
+ evhttp_connection_free(req->evcon);
+ return;
+ }
+
+ evhttp_response_code_(req, HTTP_NOTFOUND, "Not Found");
+
+ evbuffer_add_printf(buf, ERR_FORMAT, escaped_html);
+
+ mm_free(escaped_html);
+
+ evhttp_send_page_(req, buf);
+
+ evbuffer_free(buf);
+#undef ERR_FORMAT
+ }
+}
+
+/* Listener callback when a connection arrives at a server. */
+static void
+accept_socket_cb(struct evconnlistener *listener, evutil_socket_t nfd, struct sockaddr *peer_sa, int peer_socklen, void *arg)
+{
+ struct evhttp *http = arg;
+
+ evhttp_get_request(http, nfd, peer_sa, peer_socklen);
+}
+
+int
+evhttp_bind_socket(struct evhttp *http, const char *address, ev_uint16_t port)
+{
+ struct evhttp_bound_socket *bound =
+ evhttp_bind_socket_with_handle(http, address, port);
+ if (bound == NULL)
+ return (-1);
+ return (0);
+}
+
+struct evhttp_bound_socket *
+evhttp_bind_socket_with_handle(struct evhttp *http, const char *address, ev_uint16_t port)
+{
+ evutil_socket_t fd;
+ struct evhttp_bound_socket *bound;
+
+ if ((fd = bind_socket(address, port, 1 /*reuse*/)) == -1)
+ return (NULL);
+
+ if (listen(fd, 128) == -1) {
+ event_sock_warn(fd, "%s: listen", __func__);
+ evutil_closesocket(fd);
+ return (NULL);
+ }
+
+ bound = evhttp_accept_socket_with_handle(http, fd);
+
+ if (bound != NULL) {
+ event_debug(("Bound to port %d - Awaiting connections ... ",
+ port));
+ return (bound);
+ }
+
+ return (NULL);
+}
+
+int
+evhttp_accept_socket(struct evhttp *http, evutil_socket_t fd)
+{
+ struct evhttp_bound_socket *bound =
+ evhttp_accept_socket_with_handle(http, fd);
+ if (bound == NULL)
+ return (-1);
+ return (0);
+}
+
+void
+evhttp_foreach_bound_socket(struct evhttp *http,
+ evhttp_bound_socket_foreach_fn *function,
+ void *argument)
+{
+ struct evhttp_bound_socket *bound;
+
+ TAILQ_FOREACH(bound, &http->sockets, next)
+ function(bound, argument);
+}
+
+struct evhttp_bound_socket *
+evhttp_accept_socket_with_handle(struct evhttp *http, evutil_socket_t fd)
+{
+ struct evhttp_bound_socket *bound;
+ struct evconnlistener *listener;
+ const int flags =
+ LEV_OPT_REUSEABLE|LEV_OPT_CLOSE_ON_EXEC|LEV_OPT_CLOSE_ON_FREE;
+
+ listener = evconnlistener_new(http->base, NULL, NULL,
+ flags,
+ 0, /* Backlog is '0' because we already said 'listen' */
+ fd);
+ if (!listener)
+ return (NULL);
+
+ bound = evhttp_bind_listener(http, listener);
+ if (!bound) {
+ evconnlistener_free(listener);
+ return (NULL);
+ }
+ return (bound);
+}
+
+struct evhttp_bound_socket *
+evhttp_bind_listener(struct evhttp *http, struct evconnlistener *listener)
+{
+ struct evhttp_bound_socket *bound;
+
+ bound = mm_malloc(sizeof(struct evhttp_bound_socket));
+ if (bound == NULL)
+ return (NULL);
+
+ bound->listener = listener;
+ TAILQ_INSERT_TAIL(&http->sockets, bound, next);
+
+ evconnlistener_set_cb(listener, accept_socket_cb, http);
+ return bound;
+}
+
+evutil_socket_t
+evhttp_bound_socket_get_fd(struct evhttp_bound_socket *bound)
+{
+ return evconnlistener_get_fd(bound->listener);
+}
+
+struct evconnlistener *
+evhttp_bound_socket_get_listener(struct evhttp_bound_socket *bound)
+{
+ return bound->listener;
+}
+
+void
+evhttp_del_accept_socket(struct evhttp *http, struct evhttp_bound_socket *bound)
+{
+ TAILQ_REMOVE(&http->sockets, bound, next);
+ evconnlistener_free(bound->listener);
+ mm_free(bound);
+}
+
+static struct evhttp*
+evhttp_new_object(void)
+{
+ struct evhttp *http = NULL;
+
+ if ((http = mm_calloc(1, sizeof(struct evhttp))) == NULL) {
+ event_warn("%s: calloc", __func__);
+ return (NULL);
+ }
+
+ evutil_timerclear(&http->timeout);
+ evhttp_set_max_headers_size(http, EV_SIZE_MAX);
+ evhttp_set_max_body_size(http, EV_SIZE_MAX);
+ evhttp_set_default_content_type(http, "text/html; charset=ISO-8859-1");
+ evhttp_set_allowed_methods(http,
+ EVHTTP_REQ_GET |
+ EVHTTP_REQ_POST |
+ EVHTTP_REQ_HEAD |
+ EVHTTP_REQ_PUT |
+ EVHTTP_REQ_DELETE);
+
+ TAILQ_INIT(&http->sockets);
+ TAILQ_INIT(&http->callbacks);
+ TAILQ_INIT(&http->connections);
+ TAILQ_INIT(&http->virtualhosts);
+ TAILQ_INIT(&http->aliases);
+
+ return (http);
+}
+
+struct evhttp *
+evhttp_new(struct event_base *base)
+{
+ struct evhttp *http = NULL;
+
+ http = evhttp_new_object();
+ if (http == NULL)
+ return (NULL);
+ http->base = base;
+
+ return (http);
+}
+
+/*
+ * Start a web server on the specified address and port.
+ */
+
+struct evhttp *
+evhttp_start(const char *address, unsigned short port)
+{
+ struct evhttp *http = NULL;
+
+ http = evhttp_new_object();
+ if (http == NULL)
+ return (NULL);
+ if (evhttp_bind_socket(http, address, port) == -1) {
+ mm_free(http);
+ return (NULL);
+ }
+
+ return (http);
+}
+
+void
+evhttp_free(struct evhttp* http)
+{
+ struct evhttp_cb *http_cb;
+ struct evhttp_connection *evcon;
+ struct evhttp_bound_socket *bound;
+ struct evhttp* vhost;
+ struct evhttp_server_alias *alias;
+
+ /* Remove the accepting part */
+ while ((bound = TAILQ_FIRST(&http->sockets)) != NULL) {
+ TAILQ_REMOVE(&http->sockets, bound, next);
+
+ evconnlistener_free(bound->listener);
+
+ mm_free(bound);
+ }
+
+ while ((evcon = TAILQ_FIRST(&http->connections)) != NULL) {
+ /* evhttp_connection_free removes the connection */
+ evhttp_connection_free(evcon);
+ }
+
+ while ((http_cb = TAILQ_FIRST(&http->callbacks)) != NULL) {
+ TAILQ_REMOVE(&http->callbacks, http_cb, next);
+ mm_free(http_cb->what);
+ mm_free(http_cb);
+ }
+
+ while ((vhost = TAILQ_FIRST(&http->virtualhosts)) != NULL) {
+ TAILQ_REMOVE(&http->virtualhosts, vhost, next_vhost);
+
+ evhttp_free(vhost);
+ }
+
+ if (http->vhost_pattern != NULL)
+ mm_free(http->vhost_pattern);
+
+ while ((alias = TAILQ_FIRST(&http->aliases)) != NULL) {
+ TAILQ_REMOVE(&http->aliases, alias, next);
+ mm_free(alias->alias);
+ mm_free(alias);
+ }
+
+ mm_free(http);
+}
+
+int
+evhttp_add_virtual_host(struct evhttp* http, const char *pattern,
+ struct evhttp* vhost)
+{
+ /* a vhost can only be a vhost once and should not have bound sockets */
+ if (vhost->vhost_pattern != NULL ||
+ TAILQ_FIRST(&vhost->sockets) != NULL)
+ return (-1);
+
+ vhost->vhost_pattern = mm_strdup(pattern);
+ if (vhost->vhost_pattern == NULL)
+ return (-1);
+
+ TAILQ_INSERT_TAIL(&http->virtualhosts, vhost, next_vhost);
+
+ return (0);
+}
+
+int
+evhttp_remove_virtual_host(struct evhttp* http, struct evhttp* vhost)
+{
+ if (vhost->vhost_pattern == NULL)
+ return (-1);
+
+ TAILQ_REMOVE(&http->virtualhosts, vhost, next_vhost);
+
+ mm_free(vhost->vhost_pattern);
+ vhost->vhost_pattern = NULL;
+
+ return (0);
+}
+
+int
+evhttp_add_server_alias(struct evhttp *http, const char *alias)
+{
+ struct evhttp_server_alias *evalias;
+
+ evalias = mm_calloc(1, sizeof(*evalias));
+ if (!evalias)
+ return -1;
+
+ evalias->alias = mm_strdup(alias);
+ if (!evalias->alias) {
+ mm_free(evalias);
+ return -1;
+ }
+
+ TAILQ_INSERT_TAIL(&http->aliases, evalias, next);
+
+ return 0;
+}
+
+int
+evhttp_remove_server_alias(struct evhttp *http, const char *alias)
+{
+ struct evhttp_server_alias *evalias;
+
+ TAILQ_FOREACH(evalias, &http->aliases, next) {
+ if (evutil_ascii_strcasecmp(evalias->alias, alias) == 0) {
+ TAILQ_REMOVE(&http->aliases, evalias, next);
+ mm_free(evalias->alias);
+ mm_free(evalias);
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+void
+evhttp_set_timeout(struct evhttp* http, int timeout_in_secs)
+{
+ if (timeout_in_secs == -1) {
+ evhttp_set_timeout_tv(http, NULL);
+ } else {
+ struct timeval tv;
+ tv.tv_sec = timeout_in_secs;
+ tv.tv_usec = 0;
+ evhttp_set_timeout_tv(http, &tv);
+ }
+}
+
+void
+evhttp_set_timeout_tv(struct evhttp* http, const struct timeval* tv)
+{
+ if (tv) {
+ http->timeout = *tv;
+ } else {
+ evutil_timerclear(&http->timeout);
+ }
+}
+
+void
+evhttp_set_max_headers_size(struct evhttp* http, ev_ssize_t max_headers_size)
+{
+ if (max_headers_size < 0)
+ http->default_max_headers_size = EV_SIZE_MAX;
+ else
+ http->default_max_headers_size = max_headers_size;
+}
+
+void
+evhttp_set_max_body_size(struct evhttp* http, ev_ssize_t max_body_size)
+{
+ if (max_body_size < 0)
+ http->default_max_body_size = EV_UINT64_MAX;
+ else
+ http->default_max_body_size = max_body_size;
+}
+
+void
+evhttp_set_default_content_type(struct evhttp *http,
+ const char *content_type) {
+ http->default_content_type = content_type;
+}
+
+void
+evhttp_set_allowed_methods(struct evhttp* http, ev_uint16_t methods)
+{
+ http->allowed_methods = methods;
+}
+
+int
+evhttp_set_cb(struct evhttp *http, const char *uri,
+ void (*cb)(struct evhttp_request *, void *), void *cbarg)
+{
+ struct evhttp_cb *http_cb;
+
+ TAILQ_FOREACH(http_cb, &http->callbacks, next) {
+ if (strcmp(http_cb->what, uri) == 0)
+ return (-1);
+ }
+
+ if ((http_cb = mm_calloc(1, sizeof(struct evhttp_cb))) == NULL) {
+ event_warn("%s: calloc", __func__);
+ return (-2);
+ }
+
+ http_cb->what = mm_strdup(uri);
+ if (http_cb->what == NULL) {
+ event_warn("%s: strdup", __func__);
+ mm_free(http_cb);
+ return (-3);
+ }
+ http_cb->cb = cb;
+ http_cb->cbarg = cbarg;
+
+ TAILQ_INSERT_TAIL(&http->callbacks, http_cb, next);
+
+ return (0);
+}
+
+int
+evhttp_del_cb(struct evhttp *http, const char *uri)
+{
+ struct evhttp_cb *http_cb;
+
+ TAILQ_FOREACH(http_cb, &http->callbacks, next) {
+ if (strcmp(http_cb->what, uri) == 0)
+ break;
+ }
+ if (http_cb == NULL)
+ return (-1);
+
+ TAILQ_REMOVE(&http->callbacks, http_cb, next);
+ mm_free(http_cb->what);
+ mm_free(http_cb);
+
+ return (0);
+}
+
+void
+evhttp_set_gencb(struct evhttp *http,
+ void (*cb)(struct evhttp_request *, void *), void *cbarg)
+{
+ http->gencb = cb;
+ http->gencbarg = cbarg;
+}
+
+void
+evhttp_set_bevcb(struct evhttp *http,
+ struct bufferevent* (*cb)(struct event_base *, void *), void *cbarg)
+{
+ http->bevcb = cb;
+ http->bevcbarg = cbarg;
+}
+
+/*
+ * Request related functions
+ */
+
+struct evhttp_request *
+evhttp_request_new(void (*cb)(struct evhttp_request *, void *), void *arg)
+{
+ struct evhttp_request *req = NULL;
+
+ /* Allocate request structure */
+ if ((req = mm_calloc(1, sizeof(struct evhttp_request))) == NULL) {
+ event_warn("%s: calloc", __func__);
+ goto error;
+ }
+
+ req->headers_size = 0;
+ req->body_size = 0;
+
+ req->kind = EVHTTP_RESPONSE;
+ req->input_headers = mm_calloc(1, sizeof(struct evkeyvalq));
+ if (req->input_headers == NULL) {
+ event_warn("%s: calloc", __func__);
+ goto error;
+ }
+ TAILQ_INIT(req->input_headers);
+
+ req->output_headers = mm_calloc(1, sizeof(struct evkeyvalq));
+ if (req->output_headers == NULL) {
+ event_warn("%s: calloc", __func__);
+ goto error;
+ }
+ TAILQ_INIT(req->output_headers);
+
+ if ((req->input_buffer = evbuffer_new()) == NULL) {
+ event_warn("%s: evbuffer_new", __func__);
+ goto error;
+ }
+
+ if ((req->output_buffer = evbuffer_new()) == NULL) {
+ event_warn("%s: evbuffer_new", __func__);
+ goto error;
+ }
+
+ req->cb = cb;
+ req->cb_arg = arg;
+
+ return (req);
+
+ error:
+ if (req != NULL)
+ evhttp_request_free(req);
+ return (NULL);
+}
+
+void
+evhttp_request_free(struct evhttp_request *req)
+{
+ if ((req->flags & EVHTTP_REQ_DEFER_FREE) != 0) {
+ req->flags |= EVHTTP_REQ_NEEDS_FREE;
+ return;
+ }
+
+ if (req->remote_host != NULL)
+ mm_free(req->remote_host);
+ if (req->uri != NULL)
+ mm_free(req->uri);
+ if (req->uri_elems != NULL)
+ evhttp_uri_free(req->uri_elems);
+ if (req->response_code_line != NULL)
+ mm_free(req->response_code_line);
+ if (req->host_cache != NULL)
+ mm_free(req->host_cache);
+
+ evhttp_clear_headers(req->input_headers);
+ mm_free(req->input_headers);
+
+ evhttp_clear_headers(req->output_headers);
+ mm_free(req->output_headers);
+
+ if (req->input_buffer != NULL)
+ evbuffer_free(req->input_buffer);
+
+ if (req->output_buffer != NULL)
+ evbuffer_free(req->output_buffer);
+
+ mm_free(req);
+}
+
+void
+evhttp_request_own(struct evhttp_request *req)
+{
+ req->flags |= EVHTTP_USER_OWNED;
+}
+
+int
+evhttp_request_is_owned(struct evhttp_request *req)
+{
+ return (req->flags & EVHTTP_USER_OWNED) != 0;
+}
+
+struct evhttp_connection *
+evhttp_request_get_connection(struct evhttp_request *req)
+{
+ return req->evcon;
+}
+
+struct event_base *
+evhttp_connection_get_base(struct evhttp_connection *conn)
+{
+ return conn->base;
+}
+
+void
+evhttp_request_set_chunked_cb(struct evhttp_request *req,
+ void (*cb)(struct evhttp_request *, void *))
+{
+ req->chunk_cb = cb;
+}
+
+void
+evhttp_request_set_header_cb(struct evhttp_request *req,
+ int (*cb)(struct evhttp_request *, void *))
+{
+ req->header_cb = cb;
+}
+
+void
+evhttp_request_set_error_cb(struct evhttp_request *req,
+ void (*cb)(enum evhttp_request_error, void *))
+{
+ req->error_cb = cb;
+}
+
+void
+evhttp_request_set_on_complete_cb(struct evhttp_request *req,
+ void (*cb)(struct evhttp_request *, void *), void *cb_arg)
+{
+ req->on_complete_cb = cb;
+ req->on_complete_cb_arg = cb_arg;
+}
+
+/*
+ * Allows for inspection of the request URI
+ */
+
+const char *
+evhttp_request_get_uri(const struct evhttp_request *req) {
+ if (req->uri == NULL)
+ event_debug(("%s: request %p has no uri\n", __func__, req));
+ return (req->uri);
+}
+
+const struct evhttp_uri *
+evhttp_request_get_evhttp_uri(const struct evhttp_request *req) {
+ if (req->uri_elems == NULL)
+ event_debug(("%s: request %p has no uri elems\n",
+ __func__, req));
+ return (req->uri_elems);
+}
+
+const char *
+evhttp_request_get_host(struct evhttp_request *req)
+{
+ const char *host = NULL;
+
+ if (req->host_cache)
+ return req->host_cache;
+
+ if (req->uri_elems)
+ host = evhttp_uri_get_host(req->uri_elems);
+ if (!host && req->input_headers) {
+ const char *p;
+ size_t len;
+
+ host = evhttp_find_header(req->input_headers, "Host");
+ /* The Host: header may include a port. Remove it here
+ to be consistent with uri_elems case above. */
+ if (host) {
+ p = host + strlen(host) - 1;
+ while (p > host && EVUTIL_ISDIGIT_(*p))
+ --p;
+ if (p > host && *p == ':') {
+ len = p - host;
+ req->host_cache = mm_malloc(len + 1);
+ if (!req->host_cache) {
+ event_warn("%s: malloc", __func__);
+ return NULL;
+ }
+ memcpy(req->host_cache, host, len);
+ req->host_cache[len] = '\0';
+ host = req->host_cache;
+ }
+ }
+ }
+
+ return host;
+}
+
+enum evhttp_cmd_type
+evhttp_request_get_command(const struct evhttp_request *req) {
+ return (req->type);
+}
+
+int
+evhttp_request_get_response_code(const struct evhttp_request *req)
+{
+ return req->response_code;
+}
+
+const char *
+evhttp_request_get_response_code_line(const struct evhttp_request *req)
+{
+ return req->response_code_line;
+}
+
+/** Returns the input headers */
+struct evkeyvalq *evhttp_request_get_input_headers(struct evhttp_request *req)
+{
+ return (req->input_headers);
+}
+
+/** Returns the output headers */
+struct evkeyvalq *evhttp_request_get_output_headers(struct evhttp_request *req)
+{
+ return (req->output_headers);
+}
+
+/** Returns the input buffer */
+struct evbuffer *evhttp_request_get_input_buffer(struct evhttp_request *req)
+{
+ return (req->input_buffer);
+}
+
+/** Returns the output buffer */
+struct evbuffer *evhttp_request_get_output_buffer(struct evhttp_request *req)
+{
+ return (req->output_buffer);
+}
+
+
+/*
+ * Takes a file descriptor to read a request from.
+ * The callback is executed once the whole request has been read.
+ */
+
+static struct evhttp_connection*
+evhttp_get_request_connection(
+ struct evhttp* http,
+ evutil_socket_t fd, struct sockaddr *sa, ev_socklen_t salen)
+{
+ struct evhttp_connection *evcon;
+ char *hostname = NULL, *portname = NULL;
+ struct bufferevent* bev = NULL;
+
+ name_from_addr(sa, salen, &hostname, &portname);
+ if (hostname == NULL || portname == NULL) {
+ if (hostname) mm_free(hostname);
+ if (portname) mm_free(portname);
+ return (NULL);
+ }
+
+ event_debug(("%s: new request from %s:%s on "EV_SOCK_FMT"\n",
+ __func__, hostname, portname, EV_SOCK_ARG(fd)));
+
+ /* we need a connection object to put the http request on */
+ if (http->bevcb != NULL) {
+ bev = (*http->bevcb)(http->base, http->bevcbarg);
+ }
+ evcon = evhttp_connection_base_bufferevent_new(
+ http->base, NULL, bev, hostname, atoi(portname));
+ mm_free(hostname);
+ mm_free(portname);
+ if (evcon == NULL)
+ return (NULL);
+
+ evcon->max_headers_size = http->default_max_headers_size;
+ evcon->max_body_size = http->default_max_body_size;
+
+ evcon->flags |= EVHTTP_CON_INCOMING;
+ evcon->state = EVCON_READING_FIRSTLINE;
+
+ evcon->fd = fd;
+
+ bufferevent_enable(evcon->bufev, EV_READ);
+ bufferevent_disable(evcon->bufev, EV_WRITE);
+ bufferevent_setfd(evcon->bufev, fd);
+
+ return (evcon);
+}
+
+static int
+evhttp_associate_new_request_with_connection(struct evhttp_connection *evcon)
+{
+ struct evhttp *http = evcon->http_server;
+ struct evhttp_request *req;
+ if ((req = evhttp_request_new(evhttp_handle_request, http)) == NULL)
+ return (-1);
+
+ if ((req->remote_host = mm_strdup(evcon->address)) == NULL) {
+ event_warn("%s: strdup", __func__);
+ evhttp_request_free(req);
+ return (-1);
+ }
+ req->remote_port = evcon->port;
+
+ req->evcon = evcon; /* the request ends up owning the connection */
+ req->flags |= EVHTTP_REQ_OWN_CONNECTION;
+
+ /* We did not present the request to the user user yet, so treat it as
+ * if the user was done with the request. This allows us to free the
+ * request on a persistent connection if the client drops it without
+ * sending a request.
+ */
+ req->userdone = 1;
+
+ TAILQ_INSERT_TAIL(&evcon->requests, req, next);
+
+ req->kind = EVHTTP_REQUEST;
+
+
+ evhttp_start_read_(evcon);
+
+ return (0);
+}
+
+static void
+evhttp_get_request(struct evhttp *http, evutil_socket_t fd,
+ struct sockaddr *sa, ev_socklen_t salen)
+{
+ struct evhttp_connection *evcon;
+
+ evcon = evhttp_get_request_connection(http, fd, sa, salen);
+ if (evcon == NULL) {
+ event_sock_warn(fd, "%s: cannot get connection on "EV_SOCK_FMT,
+ __func__, EV_SOCK_ARG(fd));
+ evutil_closesocket(fd);
+ return;
+ }
+
+ /* the timeout can be used by the server to close idle connections */
+ if (evutil_timerisset(&http->timeout))
+ evhttp_connection_set_timeout_tv(evcon, &http->timeout);
+
+ /*
+ * if we want to accept more than one request on a connection,
+ * we need to know which http server it belongs to.
+ */
+ evcon->http_server = http;
+ TAILQ_INSERT_TAIL(&http->connections, evcon, next);
+
+ if (evhttp_associate_new_request_with_connection(evcon) == -1)
+ evhttp_connection_free(evcon);
+}
+
+
+/*
+ * Network helper functions that we do not want to export to the rest of
+ * the world.
+ */
+
+static void
+name_from_addr(struct sockaddr *sa, ev_socklen_t salen,
+ char **phost, char **pport)
+{
+ char ntop[NI_MAXHOST];
+ char strport[NI_MAXSERV];
+ int ni_result;
+
+#ifdef EVENT__HAVE_GETNAMEINFO
+ ni_result = getnameinfo(sa, salen,
+ ntop, sizeof(ntop), strport, sizeof(strport),
+ NI_NUMERICHOST|NI_NUMERICSERV);
+
+ if (ni_result != 0) {
+#ifdef EAI_SYSTEM
+ /* Windows doesn't have an EAI_SYSTEM. */
+ if (ni_result == EAI_SYSTEM)
+ event_err(1, "getnameinfo failed");
+ else
+#endif
+ event_errx(1, "getnameinfo failed: %s", gai_strerror(ni_result));
+ return;
+ }
+#else
+ ni_result = fake_getnameinfo(sa, salen,
+ ntop, sizeof(ntop), strport, sizeof(strport),
+ NI_NUMERICHOST|NI_NUMERICSERV);
+ if (ni_result != 0)
+ return;
+#endif
+
+ *phost = mm_strdup(ntop);
+ *pport = mm_strdup(strport);
+}
+
+/* Create a non-blocking socket and bind it */
+/* todo: rename this function */
+static evutil_socket_t
+bind_socket_ai(struct evutil_addrinfo *ai, int reuse)
+{
+ evutil_socket_t fd;
+
+ int on = 1, r;
+ int serrno;
+
+ /* Create listen socket */
+ fd = evutil_socket_(ai ? ai->ai_family : AF_INET,
+ SOCK_STREAM|EVUTIL_SOCK_NONBLOCK|EVUTIL_SOCK_CLOEXEC, 0);
+ if (fd == -1) {
+ event_sock_warn(-1, "socket");
+ return (-1);
+ }
+
+ if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void *)&on, sizeof(on))<0)
+ goto out;
+ if (reuse) {
+ if (evutil_make_listen_socket_reuseable(fd) < 0)
+ goto out;
+ }
+
+ if (ai != NULL) {
+ r = bind(fd, ai->ai_addr, (ev_socklen_t)ai->ai_addrlen);
+ if (r == -1)
+ goto out;
+ }
+
+ return (fd);
+
+ out:
+ serrno = EVUTIL_SOCKET_ERROR();
+ evutil_closesocket(fd);
+ EVUTIL_SET_SOCKET_ERROR(serrno);
+ return (-1);
+}
+
+static struct evutil_addrinfo *
+make_addrinfo(const char *address, ev_uint16_t port)
+{
+ struct evutil_addrinfo *ai = NULL;
+
+ struct evutil_addrinfo hints;
+ char strport[NI_MAXSERV];
+ int ai_result;
+
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+ /* turn NULL hostname into INADDR_ANY, and skip looking up any address
+ * types we don't have an interface to connect to. */
+ hints.ai_flags = EVUTIL_AI_PASSIVE|EVUTIL_AI_ADDRCONFIG;
+ evutil_snprintf(strport, sizeof(strport), "%d", port);
+ if ((ai_result = evutil_getaddrinfo(address, strport, &hints, &ai))
+ != 0) {
+ if (ai_result == EVUTIL_EAI_SYSTEM)
+ event_warn("getaddrinfo");
+ else
+ event_warnx("getaddrinfo: %s",
+ evutil_gai_strerror(ai_result));
+ return (NULL);
+ }
+
+ return (ai);
+}
+
+static evutil_socket_t
+bind_socket(const char *address, ev_uint16_t port, int reuse)
+{
+ evutil_socket_t fd;
+ struct evutil_addrinfo *aitop = NULL;
+
+ /* just create an unbound socket */
+ if (address == NULL && port == 0)
+ return bind_socket_ai(NULL, 0);
+
+ aitop = make_addrinfo(address, port);
+
+ if (aitop == NULL)
+ return (-1);
+
+ fd = bind_socket_ai(aitop, reuse);
+
+ evutil_freeaddrinfo(aitop);
+
+ return (fd);
+}
+
+struct evhttp_uri {
+ unsigned flags;
+ char *scheme; /* scheme; e.g http, ftp etc */
+ char *userinfo; /* userinfo (typically username:pass), or NULL */
+ char *host; /* hostname, IP address, or NULL */
+ int port; /* port, or zero */
+ char *path; /* path, or "". */
+ char *query; /* query, or NULL */
+ char *fragment; /* fragment or NULL */
+};
+
+struct evhttp_uri *
+evhttp_uri_new(void)
+{
+ struct evhttp_uri *uri = mm_calloc(sizeof(struct evhttp_uri), 1);
+ if (uri)
+ uri->port = -1;
+ return uri;
+}
+
+void
+evhttp_uri_set_flags(struct evhttp_uri *uri, unsigned flags)
+{
+ uri->flags = flags;
+}
+
+/* Return true if the string starting at s and ending immediately before eos
+ * is a valid URI scheme according to RFC3986
+ */
+static int
+scheme_ok(const char *s, const char *eos)
+{
+ /* scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) */
+ EVUTIL_ASSERT(eos >= s);
+ if (s == eos)
+ return 0;
+ if (!EVUTIL_ISALPHA_(*s))
+ return 0;
+ while (++s < eos) {
+ if (! EVUTIL_ISALNUM_(*s) &&
+ *s != '+' && *s != '-' && *s != '.')
+ return 0;
+ }
+ return 1;
+}
+
+#define SUBDELIMS "!$&'()*+,;="
+
+/* Return true iff [s..eos) is a valid userinfo */
+static int
+userinfo_ok(const char *s, const char *eos)
+{
+ while (s < eos) {
+ if (CHAR_IS_UNRESERVED(*s) ||
+ strchr(SUBDELIMS, *s) ||
+ *s == ':')
+ ++s;
+ else if (*s == '%' && s+2 < eos &&
+ EVUTIL_ISXDIGIT_(s[1]) &&
+ EVUTIL_ISXDIGIT_(s[2]))
+ s += 3;
+ else
+ return 0;
+ }
+ return 1;
+}
+
+static int
+regname_ok(const char *s, const char *eos)
+{
+ while (s && s<eos) {
+ if (CHAR_IS_UNRESERVED(*s) ||
+ strchr(SUBDELIMS, *s))
+ ++s;
+ else if (*s == '%' &&
+ EVUTIL_ISXDIGIT_(s[1]) &&
+ EVUTIL_ISXDIGIT_(s[2]))
+ s += 3;
+ else
+ return 0;
+ }
+ return 1;
+}
+
+static int
+parse_port(const char *s, const char *eos)
+{
+ int portnum = 0;
+ while (s < eos) {
+ if (! EVUTIL_ISDIGIT_(*s))
+ return -1;
+ portnum = (portnum * 10) + (*s - '0');
+ if (portnum < 0)
+ return -1;
+ if (portnum > 65535)
+ return -1;
+ ++s;
+ }
+ return portnum;
+}
+
+/* returns 0 for bad, 1 for ipv6, 2 for IPvFuture */
+static int
+bracket_addr_ok(const char *s, const char *eos)
+{
+ if (s + 3 > eos || *s != '[' || *(eos-1) != ']')
+ return 0;
+ if (s[1] == 'v') {
+ /* IPvFuture, or junk.
+ "v" 1*HEXDIG "." 1*( unreserved / sub-delims / ":" )
+ */
+ s += 2; /* skip [v */
+ --eos;
+ if (!EVUTIL_ISXDIGIT_(*s)) /*require at least one*/
+ return 0;
+ while (s < eos && *s != '.') {
+ if (EVUTIL_ISXDIGIT_(*s))
+ ++s;
+ else
+ return 0;
+ }
+ if (*s != '.')
+ return 0;
+ ++s;
+ while (s < eos) {
+ if (CHAR_IS_UNRESERVED(*s) ||
+ strchr(SUBDELIMS, *s) ||
+ *s == ':')
+ ++s;
+ else
+ return 0;
+ }
+ return 2;
+ } else {
+ /* IPv6, or junk */
+ char buf[64];
+ ev_ssize_t n_chars = eos-s-2;
+ struct in6_addr in6;
+ if (n_chars >= 64) /* way too long */
+ return 0;
+ memcpy(buf, s+1, n_chars);
+ buf[n_chars]='\0';
+ return (evutil_inet_pton(AF_INET6,buf,&in6)==1) ? 1 : 0;
+ }
+}
+
+static int
+parse_authority(struct evhttp_uri *uri, char *s, char *eos)
+{
+ char *cp, *port;
+ EVUTIL_ASSERT(eos);
+ if (eos == s) {
+ uri->host = mm_strdup("");
+ if (uri->host == NULL) {
+ event_warn("%s: strdup", __func__);
+ return -1;
+ }
+ return 0;
+ }
+
+ /* Optionally, we start with "userinfo@" */
+
+ cp = strchr(s, '@');
+ if (cp && cp < eos) {
+ if (! userinfo_ok(s,cp))
+ return -1;
+ *cp++ = '\0';
+ uri->userinfo = mm_strdup(s);
+ if (uri->userinfo == NULL) {
+ event_warn("%s: strdup", __func__);
+ return -1;
+ }
+ } else {
+ cp = s;
+ }
+ /* Optionally, we end with ":port" */
+ for (port=eos-1; port >= cp && EVUTIL_ISDIGIT_(*port); --port)
+ ;
+ if (port >= cp && *port == ':') {
+ if (port+1 == eos) /* Leave port unspecified; the RFC allows a
+ * nil port */
+ uri->port = -1;
+ else if ((uri->port = parse_port(port+1, eos))<0)
+ return -1;
+ eos = port;
+ }
+ /* Now, cp..eos holds the "host" port, which can be an IPv4Address,
+ * an IP-Literal, or a reg-name */
+ EVUTIL_ASSERT(eos >= cp);
+ if (*cp == '[' && eos >= cp+2 && *(eos-1) == ']') {
+ /* IPv6address, IP-Literal, or junk. */
+ if (! bracket_addr_ok(cp, eos))
+ return -1;
+ } else {
+ /* Make sure the host part is ok. */
+ if (! regname_ok(cp,eos)) /* Match IPv4Address or reg-name */
+ return -1;
+ }
+ uri->host = mm_malloc(eos-cp+1);
+ if (uri->host == NULL) {
+ event_warn("%s: malloc", __func__);
+ return -1;
+ }
+ memcpy(uri->host, cp, eos-cp);
+ uri->host[eos-cp] = '\0';
+ return 0;
+
+}
+
+static char *
+end_of_authority(char *cp)
+{
+ while (*cp) {
+ if (*cp == '?' || *cp == '#' || *cp == '/')
+ return cp;
+ ++cp;
+ }
+ return cp;
+}
+
+enum uri_part {
+ PART_PATH,
+ PART_QUERY,
+ PART_FRAGMENT
+};
+
+/* Return the character after the longest prefix of 'cp' that matches...
+ * *pchar / "/" if allow_qchars is false, or
+ * *(pchar / "/" / "?") if allow_qchars is true.
+ */
+static char *
+end_of_path(char *cp, enum uri_part part, unsigned flags)
+{
+ if (flags & EVHTTP_URI_NONCONFORMANT) {
+ /* If NONCONFORMANT:
+ * Path is everything up to a # or ? or nul.
+ * Query is everything up a # or nul
+ * Fragment is everything up to a nul.
+ */
+ switch (part) {
+ case PART_PATH:
+ while (*cp && *cp != '#' && *cp != '?')
+ ++cp;
+ break;
+ case PART_QUERY:
+ while (*cp && *cp != '#')
+ ++cp;
+ break;
+ case PART_FRAGMENT:
+ cp += strlen(cp);
+ break;
+ };
+ return cp;
+ }
+
+ while (*cp) {
+ if (CHAR_IS_UNRESERVED(*cp) ||
+ strchr(SUBDELIMS, *cp) ||
+ *cp == ':' || *cp == '@' || *cp == '/')
+ ++cp;
+ else if (*cp == '%' && EVUTIL_ISXDIGIT_(cp[1]) &&
+ EVUTIL_ISXDIGIT_(cp[2]))
+ cp += 3;
+ else if (*cp == '?' && part != PART_PATH)
+ ++cp;
+ else
+ return cp;
+ }
+ return cp;
+}
+
+static int
+path_matches_noscheme(const char *cp)
+{
+ while (*cp) {
+ if (*cp == ':')
+ return 0;
+ else if (*cp == '/')
+ return 1;
+ ++cp;
+ }
+ return 1;
+}
+
+struct evhttp_uri *
+evhttp_uri_parse(const char *source_uri)
+{
+ return evhttp_uri_parse_with_flags(source_uri, 0);
+}
+
+struct evhttp_uri *
+evhttp_uri_parse_with_flags(const char *source_uri, unsigned flags)
+{
+ char *readbuf = NULL, *readp = NULL, *token = NULL, *query = NULL;
+ char *path = NULL, *fragment = NULL;
+ int got_authority = 0;
+
+ struct evhttp_uri *uri = mm_calloc(1, sizeof(struct evhttp_uri));
+ if (uri == NULL) {
+ event_warn("%s: calloc", __func__);
+ goto err;
+ }
+ uri->port = -1;
+ uri->flags = flags;
+
+ readbuf = mm_strdup(source_uri);
+ if (readbuf == NULL) {
+ event_warn("%s: strdup", __func__);
+ goto err;
+ }
+
+ readp = readbuf;
+ token = NULL;
+
+ /* We try to follow RFC3986 here as much as we can, and match
+ the productions
+
+ URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ]
+
+ relative-ref = relative-part [ "?" query ] [ "#" fragment ]
+ */
+
+ /* 1. scheme: */
+ token = strchr(readp, ':');
+ if (token && scheme_ok(readp,token)) {
+ *token = '\0';
+ uri->scheme = mm_strdup(readp);
+ if (uri->scheme == NULL) {
+ event_warn("%s: strdup", __func__);
+ goto err;
+ }
+ readp = token+1; /* eat : */
+ }
+
+ /* 2. Optionally, "//" then an 'authority' part. */
+ if (readp[0]=='/' && readp[1] == '/') {
+ char *authority;
+ readp += 2;
+ authority = readp;
+ path = end_of_authority(readp);
+ if (parse_authority(uri, authority, path) < 0)
+ goto err;
+ readp = path;
+ got_authority = 1;
+ }
+
+ /* 3. Query: path-abempty, path-absolute, path-rootless, or path-empty
+ */
+ path = readp;
+ readp = end_of_path(path, PART_PATH, flags);
+
+ /* Query */
+ if (*readp == '?') {
+ *readp = '\0';
+ ++readp;
+ query = readp;
+ readp = end_of_path(readp, PART_QUERY, flags);
+ }
+ /* fragment */
+ if (*readp == '#') {
+ *readp = '\0';
+ ++readp;
+ fragment = readp;
+ readp = end_of_path(readp, PART_FRAGMENT, flags);
+ }
+ if (*readp != '\0') {
+ goto err;
+ }
+
+ /* These next two cases may be unreachable; I'm leaving them
+ * in to be defensive. */
+ /* If you didn't get an authority, the path can't begin with "//" */
+ if (!got_authority && path[0]=='/' && path[1]=='/')
+ goto err;
+ /* If you did get an authority, the path must begin with "/" or be
+ * empty. */
+ if (got_authority && path[0] != '/' && path[0] != '\0')
+ goto err;
+ /* (End of maybe-unreachable cases) */
+
+ /* If there was no scheme, the first part of the path (if any) must
+ * have no colon in it. */
+ if (! uri->scheme && !path_matches_noscheme(path))
+ goto err;
+
+ EVUTIL_ASSERT(path);
+ uri->path = mm_strdup(path);
+ if (uri->path == NULL) {
+ event_warn("%s: strdup", __func__);
+ goto err;
+ }
+
+ if (query) {
+ uri->query = mm_strdup(query);
+ if (uri->query == NULL) {
+ event_warn("%s: strdup", __func__);
+ goto err;
+ }
+ }
+ if (fragment) {
+ uri->fragment = mm_strdup(fragment);
+ if (uri->fragment == NULL) {
+ event_warn("%s: strdup", __func__);
+ goto err;
+ }
+ }
+
+ mm_free(readbuf);
+
+ return uri;
+err:
+ if (uri)
+ evhttp_uri_free(uri);
+ if (readbuf)
+ mm_free(readbuf);
+ return NULL;
+}
+
+void
+evhttp_uri_free(struct evhttp_uri *uri)
+{
+#define URI_FREE_STR_(f) \
+ if (uri->f) { \
+ mm_free(uri->f); \
+ }
+
+ URI_FREE_STR_(scheme);
+ URI_FREE_STR_(userinfo);
+ URI_FREE_STR_(host);
+ URI_FREE_STR_(path);
+ URI_FREE_STR_(query);
+ URI_FREE_STR_(fragment);
+
+ mm_free(uri);
+#undef URI_FREE_STR_
+}
+
+char *
+evhttp_uri_join(struct evhttp_uri *uri, char *buf, size_t limit)
+{
+ struct evbuffer *tmp = 0;
+ size_t joined_size = 0;
+ char *output = NULL;
+
+#define URI_ADD_(f) evbuffer_add(tmp, uri->f, strlen(uri->f))
+
+ if (!uri || !buf || !limit)
+ return NULL;
+
+ tmp = evbuffer_new();
+ if (!tmp)
+ return NULL;
+
+ if (uri->scheme) {
+ URI_ADD_(scheme);
+ evbuffer_add(tmp, ":", 1);
+ }
+ if (uri->host) {
+ evbuffer_add(tmp, "//", 2);
+ if (uri->userinfo)
+ evbuffer_add_printf(tmp,"%s@", uri->userinfo);
+ URI_ADD_(host);
+ if (uri->port >= 0)
+ evbuffer_add_printf(tmp,":%d", uri->port);
+
+ if (uri->path && uri->path[0] != '/' && uri->path[0] != '\0')
+ goto err;
+ }
+
+ if (uri->path)
+ URI_ADD_(path);
+
+ if (uri->query) {
+ evbuffer_add(tmp, "?", 1);
+ URI_ADD_(query);
+ }
+
+ if (uri->fragment) {
+ evbuffer_add(tmp, "#", 1);
+ URI_ADD_(fragment);
+ }
+
+ evbuffer_add(tmp, "\0", 1); /* NUL */
+
+ joined_size = evbuffer_get_length(tmp);
+
+ if (joined_size > limit) {
+ /* It doesn't fit. */
+ evbuffer_free(tmp);
+ return NULL;
+ }
+ evbuffer_remove(tmp, buf, joined_size);
+
+ output = buf;
+err:
+ evbuffer_free(tmp);
+
+ return output;
+#undef URI_ADD_
+}
+
+const char *
+evhttp_uri_get_scheme(const struct evhttp_uri *uri)
+{
+ return uri->scheme;
+}
+const char *
+evhttp_uri_get_userinfo(const struct evhttp_uri *uri)
+{
+ return uri->userinfo;
+}
+const char *
+evhttp_uri_get_host(const struct evhttp_uri *uri)
+{
+ return uri->host;
+}
+int
+evhttp_uri_get_port(const struct evhttp_uri *uri)
+{
+ return uri->port;
+}
+const char *
+evhttp_uri_get_path(const struct evhttp_uri *uri)
+{
+ return uri->path;
+}
+const char *
+evhttp_uri_get_query(const struct evhttp_uri *uri)
+{
+ return uri->query;
+}
+const char *
+evhttp_uri_get_fragment(const struct evhttp_uri *uri)
+{
+ return uri->fragment;
+}
+
+#define URI_SET_STR_(f) do { \
+ if (uri->f) \
+ mm_free(uri->f); \
+ if (f) { \
+ if ((uri->f = mm_strdup(f)) == NULL) { \
+ event_warn("%s: strdup()", __func__); \
+ return -1; \
+ } \
+ } else { \
+ uri->f = NULL; \
+ } \
+ } while(0)
+
+int
+evhttp_uri_set_scheme(struct evhttp_uri *uri, const char *scheme)
+{
+ if (scheme && !scheme_ok(scheme, scheme+strlen(scheme)))
+ return -1;
+
+ URI_SET_STR_(scheme);
+ return 0;
+}
+int
+evhttp_uri_set_userinfo(struct evhttp_uri *uri, const char *userinfo)
+{
+ if (userinfo && !userinfo_ok(userinfo, userinfo+strlen(userinfo)))
+ return -1;
+ URI_SET_STR_(userinfo);
+ return 0;
+}
+int
+evhttp_uri_set_host(struct evhttp_uri *uri, const char *host)
+{
+ if (host) {
+ if (host[0] == '[') {
+ if (! bracket_addr_ok(host, host+strlen(host)))
+ return -1;
+ } else {
+ if (! regname_ok(host, host+strlen(host)))
+ return -1;
+ }
+ }
+
+ URI_SET_STR_(host);
+ return 0;
+}
+int
+evhttp_uri_set_port(struct evhttp_uri *uri, int port)
+{
+ if (port < -1)
+ return -1;
+ uri->port = port;
+ return 0;
+}
+#define end_of_cpath(cp,p,f) \
+ ((const char*)(end_of_path(((char*)(cp)), (p), (f))))
+
+int
+evhttp_uri_set_path(struct evhttp_uri *uri, const char *path)
+{
+ if (path && end_of_cpath(path, PART_PATH, uri->flags) != path+strlen(path))
+ return -1;
+
+ URI_SET_STR_(path);
+ return 0;
+}
+int
+evhttp_uri_set_query(struct evhttp_uri *uri, const char *query)
+{
+ if (query && end_of_cpath(query, PART_QUERY, uri->flags) != query+strlen(query))
+ return -1;
+ URI_SET_STR_(query);
+ return 0;
+}
+int
+evhttp_uri_set_fragment(struct evhttp_uri *uri, const char *fragment)
+{
+ if (fragment && end_of_cpath(fragment, PART_FRAGMENT, uri->flags) != fragment+strlen(fragment))
+ return -1;
+ URI_SET_STR_(fragment);
+ return 0;
+}
diff --git a/libs/libevent/src/iocp-internal.h b/libs/libevent/src/iocp-internal.h
new file mode 100644
index 0000000000..93dbe2b1a4
--- /dev/null
+++ b/libs/libevent/src/iocp-internal.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef IOCP_INTERNAL_H_INCLUDED_
+#define IOCP_INTERNAL_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct event_overlapped;
+struct event_iocp_port;
+struct evbuffer;
+typedef void (*iocp_callback)(struct event_overlapped *, ev_uintptr_t, ev_ssize_t, int success);
+
+/* This whole file is actually win32 only. We wrap the structures in a win32
+ * ifdef so that we can test-compile code that uses these interfaces on
+ * non-win32 platforms. */
+#ifdef _WIN32
+
+/**
+ Internal use only. Wraps an OVERLAPPED that we're using for libevent
+ functionality. Whenever an event_iocp_port gets an event for a given
+ OVERLAPPED*, it upcasts the pointer to an event_overlapped, and calls the
+ iocp_callback function with the event_overlapped, the iocp key, and the
+ number of bytes transferred as arguments.
+ */
+struct event_overlapped {
+ OVERLAPPED overlapped;
+ iocp_callback cb;
+};
+
+/* Mingw's headers don't define LPFN_ACCEPTEX. */
+
+typedef BOOL (WINAPI *AcceptExPtr)(SOCKET, SOCKET, PVOID, DWORD, DWORD, DWORD, LPDWORD, LPOVERLAPPED);
+typedef BOOL (WINAPI *ConnectExPtr)(SOCKET, const struct sockaddr *, int, PVOID, DWORD, LPDWORD, LPOVERLAPPED);
+typedef void (WINAPI *GetAcceptExSockaddrsPtr)(PVOID, DWORD, DWORD, DWORD, LPSOCKADDR *, LPINT, LPSOCKADDR *, LPINT);
+
+/** Internal use only. Holds pointers to functions that only some versions of
+ Windows provide.
+ */
+struct win32_extension_fns {
+ AcceptExPtr AcceptEx;
+ ConnectExPtr ConnectEx;
+ GetAcceptExSockaddrsPtr GetAcceptExSockaddrs;
+};
+
+/**
+ Internal use only. Stores a Windows IO Completion port, along with
+ related data.
+ */
+struct event_iocp_port {
+ /** The port itself */
+ HANDLE port;
+ /* A lock to cover internal structures. */
+ CRITICAL_SECTION lock;
+ /** Number of threads ever open on the port. */
+ short n_threads;
+ /** True iff we're shutting down all the threads on this port */
+ short shutdown;
+ /** How often the threads on this port check for shutdown and other
+ * conditions */
+ long ms;
+ /* The threads that are waiting for events. */
+ HANDLE *threads;
+ /** Number of threads currently open on this port. */
+ short n_live_threads;
+ /** A semaphore to signal when we are done shutting down. */
+ HANDLE *shutdownSemaphore;
+};
+
+const struct win32_extension_fns *event_get_win32_extension_fns_(void);
+#else
+/* Dummy definition so we can test-compile more things on unix. */
+struct event_overlapped {
+ iocp_callback cb;
+};
+#endif
+
+/** Initialize the fields in an event_overlapped.
+
+ @param overlapped The struct event_overlapped to initialize
+ @param cb The callback that should be invoked once the IO operation has
+ finished.
+ */
+void event_overlapped_init_(struct event_overlapped *, iocp_callback cb);
+
+/** Allocate and return a new evbuffer that supports overlapped IO on a given
+ socket. The socket must be associated with an IO completion port using
+ event_iocp_port_associate_.
+*/
+struct evbuffer *evbuffer_overlapped_new_(evutil_socket_t fd);
+
+/** XXXX Document (nickm) */
+evutil_socket_t evbuffer_overlapped_get_fd_(struct evbuffer *buf);
+
+void evbuffer_overlapped_set_fd_(struct evbuffer *buf, evutil_socket_t fd);
+
+/** Start reading data onto the end of an overlapped evbuffer.
+
+ An evbuffer can only have one read pending at a time. While the read
+ is in progress, no other data may be added to the end of the buffer.
+ The buffer must be created with event_overlapped_init_().
+ evbuffer_commit_read_() must be called in the completion callback.
+
+ @param buf The buffer to read onto
+ @param n The number of bytes to try to read.
+ @param ol Overlapped object with associated completion callback.
+ @return 0 on success, -1 on error.
+ */
+int evbuffer_launch_read_(struct evbuffer *buf, size_t n, struct event_overlapped *ol);
+
+/** Start writing data from the start of an evbuffer.
+
+ An evbuffer can only have one write pending at a time. While the write is
+ in progress, no other data may be removed from the front of the buffer.
+ The buffer must be created with event_overlapped_init_().
+ evbuffer_commit_write_() must be called in the completion callback.
+
+ @param buf The buffer to read onto
+ @param n The number of bytes to try to read.
+ @param ol Overlapped object with associated completion callback.
+ @return 0 on success, -1 on error.
+ */
+int evbuffer_launch_write_(struct evbuffer *buf, ev_ssize_t n, struct event_overlapped *ol);
+
+/** XXX document */
+void evbuffer_commit_read_(struct evbuffer *, ev_ssize_t);
+void evbuffer_commit_write_(struct evbuffer *, ev_ssize_t);
+
+/** Create an IOCP, and launch its worker threads. Internal use only.
+
+ This interface is unstable, and will change.
+ */
+struct event_iocp_port *event_iocp_port_launch_(int n_cpus);
+
+/** Associate a file descriptor with an iocp, such that overlapped IO on the
+ fd will happen on one of the iocp's worker threads.
+*/
+int event_iocp_port_associate_(struct event_iocp_port *port, evutil_socket_t fd,
+ ev_uintptr_t key);
+
+/** Tell all threads serving an iocp to stop. Wait for up to waitMsec for all
+ the threads to finish whatever they're doing. If waitMsec is -1, wait
+ as long as required. If all the threads are done, free the port and return
+ 0. Otherwise, return -1. If you get a -1 return value, it is safe to call
+ this function again.
+*/
+int event_iocp_shutdown_(struct event_iocp_port *port, long waitMsec);
+
+/* FIXME document. */
+int event_iocp_activate_overlapped_(struct event_iocp_port *port,
+ struct event_overlapped *o,
+ ev_uintptr_t key, ev_uint32_t n_bytes);
+
+struct event_base;
+/* FIXME document. */
+struct event_iocp_port *event_base_get_iocp_(struct event_base *base);
+
+/* FIXME document. */
+int event_base_start_iocp_(struct event_base *base, int n_cpus);
+void event_base_stop_iocp_(struct event_base *base);
+
+/* FIXME document. */
+struct bufferevent *bufferevent_async_new_(struct event_base *base,
+ evutil_socket_t fd, int options);
+
+/* FIXME document. */
+void bufferevent_async_set_connected_(struct bufferevent *bev);
+int bufferevent_async_can_connect_(struct bufferevent *bev);
+int bufferevent_async_connect_(struct bufferevent *bev, evutil_socket_t fd,
+ const struct sockaddr *sa, int socklen);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libs/libevent/src/ipv6-internal.h b/libs/libevent/src/ipv6-internal.h
new file mode 100644
index 0000000000..0c207377b8
--- /dev/null
+++ b/libs/libevent/src/ipv6-internal.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Internal use only: Fake IPv6 structures and values on platforms that
+ * do not have them */
+
+#ifndef IPV6_INTERNAL_H_INCLUDED_
+#define IPV6_INTERNAL_H_INCLUDED_
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include "event2/util.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @file ipv6-internal.h
+ *
+ * Replacement types and functions for platforms that don't support ipv6
+ * properly.
+ */
+
+#ifndef EVENT__HAVE_STRUCT_IN6_ADDR
+struct in6_addr {
+ ev_uint8_t s6_addr[16];
+};
+#endif
+
+#ifndef EVENT__HAVE_SA_FAMILY_T
+typedef int sa_family_t;
+#endif
+
+#ifndef EVENT__HAVE_STRUCT_SOCKADDR_IN6
+struct sockaddr_in6 {
+ /* This will fail if we find a struct sockaddr that doesn't have
+ * sa_family as the first element. */
+ sa_family_t sin6_family;
+ ev_uint16_t sin6_port;
+ struct in6_addr sin6_addr;
+};
+#endif
+
+#ifndef AF_INET6
+#define AF_INET6 3333
+#endif
+#ifndef PF_INET6
+#define PF_INET6 AF_INET6
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libs/libevent/src/listener.c b/libs/libevent/src/listener.c
new file mode 100644
index 0000000000..2af14e3a7b
--- /dev/null
+++ b/libs/libevent/src/listener.c
@@ -0,0 +1,889 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos, Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+
+#ifdef _WIN32
+#ifndef _WIN32_WINNT
+/* Minimum required for InitializeCriticalSectionAndSpinCount */
+#define _WIN32_WINNT 0x0403
+#endif
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#include <mswsock.h>
+#endif
+#include <errno.h>
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#ifdef EVENT__HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#include "event2/listener.h"
+#include "event2/util.h"
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "mm-internal.h"
+#include "util-internal.h"
+#include "log-internal.h"
+#include "evthread-internal.h"
+#ifdef _WIN32
+#include "iocp-internal.h"
+#include "defer-internal.h"
+#include "event-internal.h"
+#endif
+
+struct evconnlistener_ops {
+ int (*enable)(struct evconnlistener *);
+ int (*disable)(struct evconnlistener *);
+ void (*destroy)(struct evconnlistener *);
+ void (*shutdown)(struct evconnlistener *);
+ evutil_socket_t (*getfd)(struct evconnlistener *);
+ struct event_base *(*getbase)(struct evconnlistener *);
+};
+
+struct evconnlistener {
+ const struct evconnlistener_ops *ops;
+ void *lock;
+ evconnlistener_cb cb;
+ evconnlistener_errorcb errorcb;
+ void *user_data;
+ unsigned flags;
+ short refcnt;
+ int accept4_flags;
+ unsigned enabled : 1;
+};
+
+struct evconnlistener_event {
+ struct evconnlistener base;
+ struct event listener;
+};
+
+#ifdef _WIN32
+struct evconnlistener_iocp {
+ struct evconnlistener base;
+ evutil_socket_t fd;
+ struct event_base *event_base;
+ struct event_iocp_port *port;
+ short n_accepting;
+ unsigned shutting_down : 1;
+ unsigned event_added : 1;
+ struct accepting_socket **accepting;
+};
+#endif
+
+#define LOCK(listener) EVLOCK_LOCK((listener)->lock, 0)
+#define UNLOCK(listener) EVLOCK_UNLOCK((listener)->lock, 0)
+
+struct evconnlistener *
+evconnlistener_new_async(struct event_base *base,
+ evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
+ evutil_socket_t fd); /* XXXX export this? */
+
+static int event_listener_enable(struct evconnlistener *);
+static int event_listener_disable(struct evconnlistener *);
+static void event_listener_destroy(struct evconnlistener *);
+static evutil_socket_t event_listener_getfd(struct evconnlistener *);
+static struct event_base *event_listener_getbase(struct evconnlistener *);
+
+#if 0
+static void
+listener_incref_and_lock(struct evconnlistener *listener)
+{
+ LOCK(listener);
+ ++listener->refcnt;
+}
+#endif
+
+static int
+listener_decref_and_unlock(struct evconnlistener *listener)
+{
+ int refcnt = --listener->refcnt;
+ if (refcnt == 0) {
+ listener->ops->destroy(listener);
+ UNLOCK(listener);
+ EVTHREAD_FREE_LOCK(listener->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ mm_free(listener);
+ return 1;
+ } else {
+ UNLOCK(listener);
+ return 0;
+ }
+}
+
+static const struct evconnlistener_ops evconnlistener_event_ops = {
+ event_listener_enable,
+ event_listener_disable,
+ event_listener_destroy,
+ NULL, /* shutdown */
+ event_listener_getfd,
+ event_listener_getbase
+};
+
+static void listener_read_cb(evutil_socket_t, short, void *);
+
+struct evconnlistener *
+evconnlistener_new(struct event_base *base,
+ evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
+ evutil_socket_t fd)
+{
+ struct evconnlistener_event *lev;
+
+#ifdef _WIN32
+ if (base && event_base_get_iocp_(base)) {
+ const struct win32_extension_fns *ext =
+ event_get_win32_extension_fns_();
+ if (ext->AcceptEx && ext->GetAcceptExSockaddrs)
+ return evconnlistener_new_async(base, cb, ptr, flags,
+ backlog, fd);
+ }
+#endif
+
+ if (backlog > 0) {
+ if (listen(fd, backlog) < 0)
+ return NULL;
+ } else if (backlog < 0) {
+ if (listen(fd, 128) < 0)
+ return NULL;
+ }
+
+ lev = mm_calloc(1, sizeof(struct evconnlistener_event));
+ if (!lev)
+ return NULL;
+
+ lev->base.ops = &evconnlistener_event_ops;
+ lev->base.cb = cb;
+ lev->base.user_data = ptr;
+ lev->base.flags = flags;
+ lev->base.refcnt = 1;
+
+ lev->base.accept4_flags = 0;
+ if (!(flags & LEV_OPT_LEAVE_SOCKETS_BLOCKING))
+ lev->base.accept4_flags |= EVUTIL_SOCK_NONBLOCK;
+ if (flags & LEV_OPT_CLOSE_ON_EXEC)
+ lev->base.accept4_flags |= EVUTIL_SOCK_CLOEXEC;
+
+ if (flags & LEV_OPT_THREADSAFE) {
+ EVTHREAD_ALLOC_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ }
+
+ event_assign(&lev->listener, base, fd, EV_READ|EV_PERSIST,
+ listener_read_cb, lev);
+
+ if (!(flags & LEV_OPT_DISABLED))
+ evconnlistener_enable(&lev->base);
+
+ return &lev->base;
+}
+
+struct evconnlistener *
+evconnlistener_new_bind(struct event_base *base, evconnlistener_cb cb,
+ void *ptr, unsigned flags, int backlog, const struct sockaddr *sa,
+ int socklen)
+{
+ struct evconnlistener *listener;
+ evutil_socket_t fd;
+ int on = 1;
+ int family = sa ? sa->sa_family : AF_UNSPEC;
+ int socktype = SOCK_STREAM | EVUTIL_SOCK_NONBLOCK;
+
+ if (backlog == 0)
+ return NULL;
+
+ if (flags & LEV_OPT_CLOSE_ON_EXEC)
+ socktype |= EVUTIL_SOCK_CLOEXEC;
+
+ fd = evutil_socket_(family, socktype, 0);
+ if (fd == -1)
+ return NULL;
+
+ if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void*)&on, sizeof(on))<0)
+ goto err;
+
+ if (flags & LEV_OPT_REUSEABLE) {
+ if (evutil_make_listen_socket_reuseable(fd) < 0)
+ goto err;
+ }
+
+ if (flags & LEV_OPT_REUSEABLE_PORT) {
+ if (evutil_make_listen_socket_reuseable_port(fd) < 0)
+ goto err;
+ }
+
+ if (flags & LEV_OPT_DEFERRED_ACCEPT) {
+ if (evutil_make_tcp_listen_socket_deferred(fd) < 0)
+ goto err;
+ }
+
+ if (sa) {
+ if (bind(fd, sa, socklen)<0)
+ goto err;
+ }
+
+ listener = evconnlistener_new(base, cb, ptr, flags, backlog, fd);
+ if (!listener)
+ goto err;
+
+ return listener;
+err:
+ evutil_closesocket(fd);
+ return NULL;
+}
+
+void
+evconnlistener_free(struct evconnlistener *lev)
+{
+ LOCK(lev);
+ lev->cb = NULL;
+ lev->errorcb = NULL;
+ if (lev->ops->shutdown)
+ lev->ops->shutdown(lev);
+ listener_decref_and_unlock(lev);
+}
+
+static void
+event_listener_destroy(struct evconnlistener *lev)
+{
+ struct evconnlistener_event *lev_e =
+ EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
+
+ event_del(&lev_e->listener);
+ if (lev->flags & LEV_OPT_CLOSE_ON_FREE)
+ evutil_closesocket(event_get_fd(&lev_e->listener));
+ event_debug_unassign(&lev_e->listener);
+}
+
+int
+evconnlistener_enable(struct evconnlistener *lev)
+{
+ int r;
+ LOCK(lev);
+ lev->enabled = 1;
+ if (lev->cb)
+ r = lev->ops->enable(lev);
+ else
+ r = 0;
+ UNLOCK(lev);
+ return r;
+}
+
+int
+evconnlistener_disable(struct evconnlistener *lev)
+{
+ int r;
+ LOCK(lev);
+ lev->enabled = 0;
+ r = lev->ops->disable(lev);
+ UNLOCK(lev);
+ return r;
+}
+
+static int
+event_listener_enable(struct evconnlistener *lev)
+{
+ struct evconnlistener_event *lev_e =
+ EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
+ return event_add(&lev_e->listener, NULL);
+}
+
+static int
+event_listener_disable(struct evconnlistener *lev)
+{
+ struct evconnlistener_event *lev_e =
+ EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
+ return event_del(&lev_e->listener);
+}
+
+evutil_socket_t
+evconnlistener_get_fd(struct evconnlistener *lev)
+{
+ evutil_socket_t fd;
+ LOCK(lev);
+ fd = lev->ops->getfd(lev);
+ UNLOCK(lev);
+ return fd;
+}
+
+static evutil_socket_t
+event_listener_getfd(struct evconnlistener *lev)
+{
+ struct evconnlistener_event *lev_e =
+ EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
+ return event_get_fd(&lev_e->listener);
+}
+
+struct event_base *
+evconnlistener_get_base(struct evconnlistener *lev)
+{
+ struct event_base *base;
+ LOCK(lev);
+ base = lev->ops->getbase(lev);
+ UNLOCK(lev);
+ return base;
+}
+
+static struct event_base *
+event_listener_getbase(struct evconnlistener *lev)
+{
+ struct evconnlistener_event *lev_e =
+ EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
+ return event_get_base(&lev_e->listener);
+}
+
+void
+evconnlistener_set_cb(struct evconnlistener *lev,
+ evconnlistener_cb cb, void *arg)
+{
+ int enable = 0;
+ LOCK(lev);
+ if (lev->enabled && !lev->cb)
+ enable = 1;
+ lev->cb = cb;
+ lev->user_data = arg;
+ if (enable)
+ evconnlistener_enable(lev);
+ UNLOCK(lev);
+}
+
+void
+evconnlistener_set_error_cb(struct evconnlistener *lev,
+ evconnlistener_errorcb errorcb)
+{
+ LOCK(lev);
+ lev->errorcb = errorcb;
+ UNLOCK(lev);
+}
+
+static void
+listener_read_cb(evutil_socket_t fd, short what, void *p)
+{
+ struct evconnlistener *lev = p;
+ int err;
+ evconnlistener_cb cb;
+ evconnlistener_errorcb errorcb;
+ void *user_data;
+ LOCK(lev);
+ while (1) {
+ struct sockaddr_storage ss;
+ ev_socklen_t socklen = sizeof(ss);
+ evutil_socket_t new_fd = evutil_accept4_(fd, (struct sockaddr*)&ss, &socklen, lev->accept4_flags);
+ if (new_fd < 0)
+ break;
+ if (socklen == 0) {
+ /* This can happen with some older linux kernels in
+ * response to nmap. */
+ evutil_closesocket(new_fd);
+ continue;
+ }
+
+ if (lev->cb == NULL) {
+ evutil_closesocket(new_fd);
+ UNLOCK(lev);
+ return;
+ }
+ ++lev->refcnt;
+ cb = lev->cb;
+ user_data = lev->user_data;
+ UNLOCK(lev);
+ cb(lev, new_fd, (struct sockaddr*)&ss, (int)socklen,
+ user_data);
+ LOCK(lev);
+ if (lev->refcnt == 1) {
+ int freed = listener_decref_and_unlock(lev);
+ EVUTIL_ASSERT(freed);
+
+ evutil_closesocket(new_fd);
+ return;
+ }
+ --lev->refcnt;
+ }
+ err = evutil_socket_geterror(fd);
+ if (EVUTIL_ERR_ACCEPT_RETRIABLE(err)) {
+ UNLOCK(lev);
+ return;
+ }
+ if (lev->errorcb != NULL) {
+ ++lev->refcnt;
+ errorcb = lev->errorcb;
+ user_data = lev->user_data;
+ UNLOCK(lev);
+ errorcb(lev, user_data);
+ LOCK(lev);
+ listener_decref_and_unlock(lev);
+ } else {
+ event_sock_warn(fd, "Error from accept() call");
+ }
+}
+
+#ifdef _WIN32
+struct accepting_socket {
+ CRITICAL_SECTION lock;
+ struct event_overlapped overlapped;
+ SOCKET s;
+ int error;
+ struct event_callback deferred;
+ struct evconnlistener_iocp *lev;
+ ev_uint8_t buflen;
+ ev_uint8_t family;
+ unsigned free_on_cb:1;
+ char addrbuf[1];
+};
+
+static void accepted_socket_cb(struct event_overlapped *o, ev_uintptr_t key,
+ ev_ssize_t n, int ok);
+static void accepted_socket_invoke_user_cb(struct event_callback *cb, void *arg);
+
+static void
+iocp_listener_event_add(struct evconnlistener_iocp *lev)
+{
+ if (lev->event_added)
+ return;
+
+ lev->event_added = 1;
+ event_base_add_virtual_(lev->event_base);
+}
+
+static void
+iocp_listener_event_del(struct evconnlistener_iocp *lev)
+{
+ if (!lev->event_added)
+ return;
+
+ lev->event_added = 0;
+ event_base_del_virtual_(lev->event_base);
+}
+
+static struct accepting_socket *
+new_accepting_socket(struct evconnlistener_iocp *lev, int family)
+{
+ struct accepting_socket *res;
+ int addrlen;
+ int buflen;
+
+ if (family == AF_INET)
+ addrlen = sizeof(struct sockaddr_in);
+ else if (family == AF_INET6)
+ addrlen = sizeof(struct sockaddr_in6);
+ else
+ return NULL;
+ buflen = (addrlen+16)*2;
+
+ res = mm_calloc(1,sizeof(struct accepting_socket)-1+buflen);
+ if (!res)
+ return NULL;
+
+ event_overlapped_init_(&res->overlapped, accepted_socket_cb);
+ res->s = INVALID_SOCKET;
+ res->lev = lev;
+ res->buflen = buflen;
+ res->family = family;
+
+ event_deferred_cb_init_(&res->deferred,
+ event_base_get_npriorities(lev->event_base) / 2,
+ accepted_socket_invoke_user_cb, res);
+
+ InitializeCriticalSectionAndSpinCount(&res->lock, 1000);
+
+ return res;
+}
+
+static void
+free_and_unlock_accepting_socket(struct accepting_socket *as)
+{
+ /* requires lock. */
+ if (as->s != INVALID_SOCKET)
+ closesocket(as->s);
+
+ LeaveCriticalSection(&as->lock);
+ DeleteCriticalSection(&as->lock);
+ mm_free(as);
+}
+
+static int
+start_accepting(struct accepting_socket *as)
+{
+ /* requires lock */
+ const struct win32_extension_fns *ext = event_get_win32_extension_fns_();
+ DWORD pending = 0;
+ SOCKET s = socket(as->family, SOCK_STREAM, 0);
+ int error = 0;
+
+ if (!as->lev->base.enabled)
+ return 0;
+
+ if (s == INVALID_SOCKET) {
+ error = WSAGetLastError();
+ goto report_err;
+ }
+
+ /* XXXX It turns out we need to do this again later. Does this call
+ * have any effect? */
+ setsockopt(s, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
+ (char *)&as->lev->fd, sizeof(&as->lev->fd));
+
+ if (!(as->lev->base.flags & LEV_OPT_LEAVE_SOCKETS_BLOCKING))
+ evutil_make_socket_nonblocking(s);
+
+ if (event_iocp_port_associate_(as->lev->port, s, 1) < 0) {
+ closesocket(s);
+ return -1;
+ }
+
+ as->s = s;
+
+ if (ext->AcceptEx(as->lev->fd, s, as->addrbuf, 0,
+ as->buflen/2, as->buflen/2, &pending, &as->overlapped.overlapped))
+ {
+ /* Immediate success! */
+ accepted_socket_cb(&as->overlapped, 1, 0, 1);
+ } else {
+ error = WSAGetLastError();
+ if (error != ERROR_IO_PENDING) {
+ goto report_err;
+ }
+ }
+
+ return 0;
+
+report_err:
+ as->error = error;
+ event_deferred_cb_schedule_(
+ as->lev->event_base,
+ &as->deferred);
+ return 0;
+}
+
+static void
+stop_accepting(struct accepting_socket *as)
+{
+ /* requires lock. */
+ SOCKET s = as->s;
+ as->s = INVALID_SOCKET;
+ closesocket(s);
+}
+
+static void
+accepted_socket_invoke_user_cb(struct event_callback *dcb, void *arg)
+{
+ struct accepting_socket *as = arg;
+
+ struct sockaddr *sa_local=NULL, *sa_remote=NULL;
+ int socklen_local=0, socklen_remote=0;
+ const struct win32_extension_fns *ext = event_get_win32_extension_fns_();
+ struct evconnlistener *lev = &as->lev->base;
+ evutil_socket_t sock=-1;
+ void *data;
+ evconnlistener_cb cb=NULL;
+ evconnlistener_errorcb errorcb=NULL;
+ int error;
+
+ EVUTIL_ASSERT(ext->GetAcceptExSockaddrs);
+
+ LOCK(lev);
+ EnterCriticalSection(&as->lock);
+ if (as->free_on_cb) {
+ free_and_unlock_accepting_socket(as);
+ listener_decref_and_unlock(lev);
+ return;
+ }
+
+ ++lev->refcnt;
+
+ error = as->error;
+ if (error) {
+ as->error = 0;
+ errorcb = lev->errorcb;
+ } else {
+ ext->GetAcceptExSockaddrs(
+ as->addrbuf, 0, as->buflen/2, as->buflen/2,
+ &sa_local, &socklen_local, &sa_remote,
+ &socklen_remote);
+ sock = as->s;
+ cb = lev->cb;
+ as->s = INVALID_SOCKET;
+
+ /* We need to call this so getsockname, getpeername, and
+ * shutdown work correctly on the accepted socket. */
+ /* XXXX handle error? */
+ setsockopt(sock, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
+ (char *)&as->lev->fd, sizeof(&as->lev->fd));
+ }
+ data = lev->user_data;
+
+ LeaveCriticalSection(&as->lock);
+ UNLOCK(lev);
+
+ if (errorcb) {
+ WSASetLastError(error);
+ errorcb(lev, data);
+ } else if (cb) {
+ cb(lev, sock, sa_remote, socklen_remote, data);
+ }
+
+ LOCK(lev);
+ if (listener_decref_and_unlock(lev))
+ return;
+
+ EnterCriticalSection(&as->lock);
+ start_accepting(as);
+ LeaveCriticalSection(&as->lock);
+}
+
+static void
+accepted_socket_cb(struct event_overlapped *o, ev_uintptr_t key, ev_ssize_t n, int ok)
+{
+ struct accepting_socket *as =
+ EVUTIL_UPCAST(o, struct accepting_socket, overlapped);
+
+ LOCK(&as->lev->base);
+ EnterCriticalSection(&as->lock);
+ if (ok) {
+ /* XXXX Don't do this if some EV_MT flag is set. */
+ event_deferred_cb_schedule_(
+ as->lev->event_base,
+ &as->deferred);
+ LeaveCriticalSection(&as->lock);
+ } else if (as->free_on_cb) {
+ struct evconnlistener *lev = &as->lev->base;
+ free_and_unlock_accepting_socket(as);
+ listener_decref_and_unlock(lev);
+ return;
+ } else if (as->s == INVALID_SOCKET) {
+ /* This is okay; we were disabled by iocp_listener_disable. */
+ LeaveCriticalSection(&as->lock);
+ } else {
+ /* Some error on accept that we couldn't actually handle. */
+ BOOL ok;
+ DWORD transfer = 0, flags=0;
+ event_sock_warn(as->s, "Unexpected error on AcceptEx");
+ ok = WSAGetOverlappedResult(as->s, &o->overlapped,
+ &transfer, FALSE, &flags);
+ if (ok) {
+ /* well, that was confusing! */
+ as->error = 1;
+ } else {
+ as->error = WSAGetLastError();
+ }
+ event_deferred_cb_schedule_(
+ as->lev->event_base,
+ &as->deferred);
+ LeaveCriticalSection(&as->lock);
+ }
+ UNLOCK(&as->lev->base);
+}
+
+static int
+iocp_listener_enable(struct evconnlistener *lev)
+{
+ int i;
+ struct evconnlistener_iocp *lev_iocp =
+ EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
+
+ LOCK(lev);
+ iocp_listener_event_add(lev_iocp);
+ for (i = 0; i < lev_iocp->n_accepting; ++i) {
+ struct accepting_socket *as = lev_iocp->accepting[i];
+ if (!as)
+ continue;
+ EnterCriticalSection(&as->lock);
+ if (!as->free_on_cb && as->s == INVALID_SOCKET)
+ start_accepting(as);
+ LeaveCriticalSection(&as->lock);
+ }
+ UNLOCK(lev);
+ return 0;
+}
+
+static int
+iocp_listener_disable_impl(struct evconnlistener *lev, int shutdown)
+{
+ int i;
+ struct evconnlistener_iocp *lev_iocp =
+ EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
+
+ LOCK(lev);
+ iocp_listener_event_del(lev_iocp);
+ for (i = 0; i < lev_iocp->n_accepting; ++i) {
+ struct accepting_socket *as = lev_iocp->accepting[i];
+ if (!as)
+ continue;
+ EnterCriticalSection(&as->lock);
+ if (!as->free_on_cb && as->s != INVALID_SOCKET) {
+ if (shutdown)
+ as->free_on_cb = 1;
+ stop_accepting(as);
+ }
+ LeaveCriticalSection(&as->lock);
+ }
+
+ if (shutdown && lev->flags & LEV_OPT_CLOSE_ON_FREE)
+ evutil_closesocket(lev_iocp->fd);
+
+ UNLOCK(lev);
+ return 0;
+}
+
+static int
+iocp_listener_disable(struct evconnlistener *lev)
+{
+ return iocp_listener_disable_impl(lev,0);
+}
+
+static void
+iocp_listener_destroy(struct evconnlistener *lev)
+{
+ struct evconnlistener_iocp *lev_iocp =
+ EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
+
+ if (! lev_iocp->shutting_down) {
+ lev_iocp->shutting_down = 1;
+ iocp_listener_disable_impl(lev,1);
+ }
+
+}
+
+static evutil_socket_t
+iocp_listener_getfd(struct evconnlistener *lev)
+{
+ struct evconnlistener_iocp *lev_iocp =
+ EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
+ return lev_iocp->fd;
+}
+static struct event_base *
+iocp_listener_getbase(struct evconnlistener *lev)
+{
+ struct evconnlistener_iocp *lev_iocp =
+ EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
+ return lev_iocp->event_base;
+}
+
+static const struct evconnlistener_ops evconnlistener_iocp_ops = {
+ iocp_listener_enable,
+ iocp_listener_disable,
+ iocp_listener_destroy,
+ iocp_listener_destroy, /* shutdown */
+ iocp_listener_getfd,
+ iocp_listener_getbase
+};
+
+/* XXX define some way to override this. */
+#define N_SOCKETS_PER_LISTENER 4
+
+struct evconnlistener *
+evconnlistener_new_async(struct event_base *base,
+ evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
+ evutil_socket_t fd)
+{
+ struct sockaddr_storage ss;
+ int socklen = sizeof(ss);
+ struct evconnlistener_iocp *lev;
+ int i;
+
+ flags |= LEV_OPT_THREADSAFE;
+
+ if (!base || !event_base_get_iocp_(base))
+ goto err;
+
+ /* XXXX duplicate code */
+ if (backlog > 0) {
+ if (listen(fd, backlog) < 0)
+ goto err;
+ } else if (backlog < 0) {
+ if (listen(fd, 128) < 0)
+ goto err;
+ }
+ if (getsockname(fd, (struct sockaddr*)&ss, &socklen)) {
+ event_sock_warn(fd, "getsockname");
+ goto err;
+ }
+ lev = mm_calloc(1, sizeof(struct evconnlistener_iocp));
+ if (!lev) {
+ event_warn("calloc");
+ goto err;
+ }
+ lev->base.ops = &evconnlistener_iocp_ops;
+ lev->base.cb = cb;
+ lev->base.user_data = ptr;
+ lev->base.flags = flags;
+ lev->base.refcnt = 1;
+ lev->base.enabled = 1;
+
+ lev->port = event_base_get_iocp_(base);
+ lev->fd = fd;
+ lev->event_base = base;
+
+
+ if (event_iocp_port_associate_(lev->port, fd, 1) < 0)
+ goto err_free_lev;
+
+ EVTHREAD_ALLOC_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+
+ lev->n_accepting = N_SOCKETS_PER_LISTENER;
+ lev->accepting = mm_calloc(lev->n_accepting,
+ sizeof(struct accepting_socket *));
+ if (!lev->accepting) {
+ event_warn("calloc");
+ goto err_delete_lock;
+ }
+ for (i = 0; i < lev->n_accepting; ++i) {
+ lev->accepting[i] = new_accepting_socket(lev, ss.ss_family);
+ if (!lev->accepting[i]) {
+ event_warnx("Couldn't create accepting socket");
+ goto err_free_accepting;
+ }
+ if (cb && start_accepting(lev->accepting[i]) < 0) {
+ event_warnx("Couldn't start accepting on socket");
+ EnterCriticalSection(&lev->accepting[i]->lock);
+ free_and_unlock_accepting_socket(lev->accepting[i]);
+ goto err_free_accepting;
+ }
+ ++lev->base.refcnt;
+ }
+
+ iocp_listener_event_add(lev);
+
+ return &lev->base;
+
+err_free_accepting:
+ mm_free(lev->accepting);
+ /* XXXX free the other elements. */
+err_delete_lock:
+ EVTHREAD_FREE_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+err_free_lev:
+ mm_free(lev);
+err:
+ /* Don't close the fd, it is caller's responsibility. */
+ return NULL;
+}
+
+#endif
diff --git a/libs/libevent/src/log-internal.h b/libs/libevent/src/log-internal.h
new file mode 100644
index 0000000000..330478a9ed
--- /dev/null
+++ b/libs/libevent/src/log-internal.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef LOG_INTERNAL_H_INCLUDED_
+#define LOG_INTERNAL_H_INCLUDED_
+
+#include "event2/util.h"
+
+#ifdef __GNUC__
+#define EV_CHECK_FMT(a,b) __attribute__((format(printf, a, b)))
+#define EV_NORETURN __attribute__((noreturn))
+#else
+#define EV_CHECK_FMT(a,b)
+#define EV_NORETURN
+#endif
+
+#define EVENT_ERR_ABORT_ ((int)0xdeaddead)
+
+#define USE_GLOBAL_FOR_DEBUG_LOGGING
+
+#if !defined(EVENT__DISABLE_DEBUG_MODE) || defined(USE_DEBUG)
+#define EVENT_DEBUG_LOGGING_ENABLED
+#endif
+
+#ifdef EVENT_DEBUG_LOGGING_ENABLED
+#ifdef USE_GLOBAL_FOR_DEBUG_LOGGING
+extern ev_uint32_t event_debug_logging_mask_;
+#define event_debug_get_logging_mask_() (event_debug_logging_mask_)
+#else
+ev_uint32_t event_debug_get_logging_mask_(void);
+#endif
+#else
+#define event_debug_get_logging_mask_() (0)
+#endif
+
+void event_err(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3) EV_NORETURN;
+void event_warn(const char *fmt, ...) EV_CHECK_FMT(1,2);
+void event_sock_err(int eval, evutil_socket_t sock, const char *fmt, ...) EV_CHECK_FMT(3,4) EV_NORETURN;
+void event_sock_warn(evutil_socket_t sock, const char *fmt, ...) EV_CHECK_FMT(2,3);
+void event_errx(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3) EV_NORETURN;
+void event_warnx(const char *fmt, ...) EV_CHECK_FMT(1,2);
+void event_msgx(const char *fmt, ...) EV_CHECK_FMT(1,2);
+void event_debugx_(const char *fmt, ...) EV_CHECK_FMT(1,2);
+
+void event_logv_(int severity, const char *errstr, const char *fmt, va_list ap)
+ EV_CHECK_FMT(3,0);
+
+#ifdef EVENT_DEBUG_LOGGING_ENABLED
+#define event_debug(x) do { \
+ if (event_debug_get_logging_mask_()) { \
+ event_debugx_ x; \
+ } \
+ } while (0)
+#else
+#define event_debug(x) ((void)0)
+#endif
+
+#undef EV_CHECK_FMT
+
+#endif
diff --git a/libs/libevent/src/log.c b/libs/libevent/src/log.c
new file mode 100644
index 0000000000..e8ae9fdc31
--- /dev/null
+++ b/libs/libevent/src/log.c
@@ -0,0 +1,253 @@
+/* $OpenBSD: err.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
+
+/*
+ * log.c
+ *
+ * Based on err.c, which was adapted from OpenBSD libc *err* *warn* code.
+ *
+ * Copyright (c) 2005-2012 Niels Provos and Nick Mathewson
+ *
+ * Copyright (c) 2000 Dug Song <dugsong@monkey.org>
+ *
+ * Copyright (c) 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+#include <sys/types.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <errno.h>
+#include "event2/event.h"
+#include "event2/util.h"
+
+#include "log-internal.h"
+
+static void event_log(int severity, const char *msg);
+static void event_exit(int errcode) EV_NORETURN;
+
+static event_fatal_cb fatal_fn = NULL;
+
+#ifdef EVENT_DEBUG_LOGGING_ENABLED
+#ifdef USE_DEBUG
+#define DEFAULT_MASK EVENT_DBG_ALL
+#else
+#define DEFAULT_MASK 0
+#endif
+
+#ifdef USE_GLOBAL_FOR_DEBUG_LOGGING
+ev_uint32_t event_debug_logging_mask_ = DEFAULT_MASK;
+#else
+static ev_uint32_t event_debug_logging_mask_ = DEFAULT_MASK;
+ev_uint32_t
+event_debug_get_logging_mask_(void)
+{
+ return event_debug_logging_mask_;
+}
+#endif
+#endif /* EVENT_DEBUG_LOGGING_ENABLED */
+
+void
+event_enable_debug_logging(ev_uint32_t which)
+{
+#ifdef EVENT_DEBUG_LOGGING_ENABLED
+ event_debug_logging_mask_ = which;
+#endif
+}
+
+void
+event_set_fatal_callback(event_fatal_cb cb)
+{
+ fatal_fn = cb;
+}
+
+static void
+event_exit(int errcode)
+{
+ if (fatal_fn) {
+ fatal_fn(errcode);
+ exit(errcode); /* should never be reached */
+ } else if (errcode == EVENT_ERR_ABORT_)
+ abort();
+ else
+ exit(errcode);
+}
+
+void
+event_err(int eval, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ event_logv_(EVENT_LOG_ERR, strerror(errno), fmt, ap);
+ va_end(ap);
+ event_exit(eval);
+}
+
+void
+event_warn(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ event_logv_(EVENT_LOG_WARN, strerror(errno), fmt, ap);
+ va_end(ap);
+}
+
+void
+event_sock_err(int eval, evutil_socket_t sock, const char *fmt, ...)
+{
+ va_list ap;
+ int err = evutil_socket_geterror(sock);
+
+ va_start(ap, fmt);
+ event_logv_(EVENT_LOG_ERR, evutil_socket_error_to_string(err), fmt, ap);
+ va_end(ap);
+ event_exit(eval);
+}
+
+void
+event_sock_warn(evutil_socket_t sock, const char *fmt, ...)
+{
+ va_list ap;
+ int err = evutil_socket_geterror(sock);
+
+ va_start(ap, fmt);
+ event_logv_(EVENT_LOG_WARN, evutil_socket_error_to_string(err), fmt, ap);
+ va_end(ap);
+}
+
+void
+event_errx(int eval, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ event_logv_(EVENT_LOG_ERR, NULL, fmt, ap);
+ va_end(ap);
+ event_exit(eval);
+}
+
+void
+event_warnx(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ event_logv_(EVENT_LOG_WARN, NULL, fmt, ap);
+ va_end(ap);
+}
+
+void
+event_msgx(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ event_logv_(EVENT_LOG_MSG, NULL, fmt, ap);
+ va_end(ap);
+}
+
+void
+event_debugx_(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ event_logv_(EVENT_LOG_DEBUG, NULL, fmt, ap);
+ va_end(ap);
+}
+
+void
+event_logv_(int severity, const char *errstr, const char *fmt, va_list ap)
+{
+ char buf[1024];
+ size_t len;
+
+ if (severity == EVENT_LOG_DEBUG && !event_debug_get_logging_mask_())
+ return;
+
+ if (fmt != NULL)
+ evutil_vsnprintf(buf, sizeof(buf), fmt, ap);
+ else
+ buf[0] = '\0';
+
+ if (errstr) {
+ len = strlen(buf);
+ if (len < sizeof(buf) - 3) {
+ evutil_snprintf(buf + len, sizeof(buf) - len, ": %s", errstr);
+ }
+ }
+
+ event_log(severity, buf);
+}
+
+static event_log_cb log_fn = NULL;
+
+void
+event_set_log_callback(event_log_cb cb)
+{
+ log_fn = cb;
+}
+
+static void
+event_log(int severity, const char *msg)
+{
+ if (log_fn)
+ log_fn(severity, msg);
+ else {
+ const char *severity_str;
+ switch (severity) {
+ case EVENT_LOG_DEBUG:
+ severity_str = "debug";
+ break;
+ case EVENT_LOG_MSG:
+ severity_str = "msg";
+ break;
+ case EVENT_LOG_WARN:
+ severity_str = "warn";
+ break;
+ case EVENT_LOG_ERR:
+ severity_str = "err";
+ break;
+ default:
+ severity_str = "???";
+ break;
+ }
+ (void)fprintf(stderr, "[%s] %s\n", severity_str, msg);
+ }
+}
diff --git a/libs/libevent/src/minheap-internal.h b/libs/libevent/src/minheap-internal.h
new file mode 100644
index 0000000000..b3b6f1fd49
--- /dev/null
+++ b/libs/libevent/src/minheap-internal.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Copyright (c) 2006 Maxim Yegorushkin <maxim.yegorushkin@gmail.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef MINHEAP_INTERNAL_H_INCLUDED_
+#define MINHEAP_INTERNAL_H_INCLUDED_
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "event2/util.h"
+#include "util-internal.h"
+#include "mm-internal.h"
+
+typedef struct min_heap
+{
+ struct event** p;
+ unsigned n, a;
+} min_heap_t;
+
+static inline void min_heap_ctor_(min_heap_t* s);
+static inline void min_heap_dtor_(min_heap_t* s);
+static inline void min_heap_elem_init_(struct event* e);
+static inline int min_heap_elt_is_top_(const struct event *e);
+static inline int min_heap_empty_(min_heap_t* s);
+static inline unsigned min_heap_size_(min_heap_t* s);
+static inline struct event* min_heap_top_(min_heap_t* s);
+static inline int min_heap_reserve_(min_heap_t* s, unsigned n);
+static inline int min_heap_push_(min_heap_t* s, struct event* e);
+static inline struct event* min_heap_pop_(min_heap_t* s);
+static inline int min_heap_adjust_(min_heap_t *s, struct event* e);
+static inline int min_heap_erase_(min_heap_t* s, struct event* e);
+static inline void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e);
+static inline void min_heap_shift_up_unconditional_(min_heap_t* s, unsigned hole_index, struct event* e);
+static inline void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e);
+
+#define min_heap_elem_greater(a, b) \
+ (evutil_timercmp(&(a)->ev_timeout, &(b)->ev_timeout, >))
+
+void min_heap_ctor_(min_heap_t* s) { s->p = 0; s->n = 0; s->a = 0; }
+void min_heap_dtor_(min_heap_t* s) { if (s->p) mm_free(s->p); }
+void min_heap_elem_init_(struct event* e) { e->ev_timeout_pos.min_heap_idx = -1; }
+int min_heap_empty_(min_heap_t* s) { return 0u == s->n; }
+unsigned min_heap_size_(min_heap_t* s) { return s->n; }
+struct event* min_heap_top_(min_heap_t* s) { return s->n ? *s->p : 0; }
+
+int min_heap_push_(min_heap_t* s, struct event* e)
+{
+ if (min_heap_reserve_(s, s->n + 1))
+ return -1;
+ min_heap_shift_up_(s, s->n++, e);
+ return 0;
+}
+
+struct event* min_heap_pop_(min_heap_t* s)
+{
+ if (s->n)
+ {
+ struct event* e = *s->p;
+ min_heap_shift_down_(s, 0u, s->p[--s->n]);
+ e->ev_timeout_pos.min_heap_idx = -1;
+ return e;
+ }
+ return 0;
+}
+
+int min_heap_elt_is_top_(const struct event *e)
+{
+ return e->ev_timeout_pos.min_heap_idx == 0;
+}
+
+int min_heap_erase_(min_heap_t* s, struct event* e)
+{
+ if (-1 != e->ev_timeout_pos.min_heap_idx)
+ {
+ struct event *last = s->p[--s->n];
+ unsigned parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
+ /* we replace e with the last element in the heap. We might need to
+ shift it upward if it is less than its parent, or downward if it is
+ greater than one or both its children. Since the children are known
+ to be less than the parent, it can't need to shift both up and
+ down. */
+ if (e->ev_timeout_pos.min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], last))
+ min_heap_shift_up_unconditional_(s, e->ev_timeout_pos.min_heap_idx, last);
+ else
+ min_heap_shift_down_(s, e->ev_timeout_pos.min_heap_idx, last);
+ e->ev_timeout_pos.min_heap_idx = -1;
+ return 0;
+ }
+ return -1;
+}
+
+int min_heap_adjust_(min_heap_t *s, struct event *e)
+{
+ if (-1 == e->ev_timeout_pos.min_heap_idx) {
+ return min_heap_push_(s, e);
+ } else {
+ unsigned parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
+ /* The position of e has changed; we shift it up or down
+ * as needed. We can't need to do both. */
+ if (e->ev_timeout_pos.min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], e))
+ min_heap_shift_up_unconditional_(s, e->ev_timeout_pos.min_heap_idx, e);
+ else
+ min_heap_shift_down_(s, e->ev_timeout_pos.min_heap_idx, e);
+ return 0;
+ }
+}
+
+int min_heap_reserve_(min_heap_t* s, unsigned n)
+{
+ if (s->a < n)
+ {
+ struct event** p;
+ unsigned a = s->a ? s->a * 2 : 8;
+ if (a < n)
+ a = n;
+ if (!(p = (struct event**)mm_realloc(s->p, a * sizeof *p)))
+ return -1;
+ s->p = p;
+ s->a = a;
+ }
+ return 0;
+}
+
+void min_heap_shift_up_unconditional_(min_heap_t* s, unsigned hole_index, struct event* e)
+{
+ unsigned parent = (hole_index - 1) / 2;
+ do
+ {
+ (s->p[hole_index] = s->p[parent])->ev_timeout_pos.min_heap_idx = hole_index;
+ hole_index = parent;
+ parent = (hole_index - 1) / 2;
+ } while (hole_index && min_heap_elem_greater(s->p[parent], e));
+ (s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index;
+}
+
+void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e)
+{
+ unsigned parent = (hole_index - 1) / 2;
+ while (hole_index && min_heap_elem_greater(s->p[parent], e))
+ {
+ (s->p[hole_index] = s->p[parent])->ev_timeout_pos.min_heap_idx = hole_index;
+ hole_index = parent;
+ parent = (hole_index - 1) / 2;
+ }
+ (s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index;
+}
+
+void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e)
+{
+ unsigned min_child = 2 * (hole_index + 1);
+ while (min_child <= s->n)
+ {
+ min_child -= min_child == s->n || min_heap_elem_greater(s->p[min_child], s->p[min_child - 1]);
+ if (!(min_heap_elem_greater(e, s->p[min_child])))
+ break;
+ (s->p[hole_index] = s->p[min_child])->ev_timeout_pos.min_heap_idx = hole_index;
+ hole_index = min_child;
+ min_child = 2 * (hole_index + 1);
+ }
+ (s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index;
+}
+
+#endif /* MINHEAP_INTERNAL_H_INCLUDED_ */
diff --git a/libs/libevent/src/mm-internal.h b/libs/libevent/src/mm-internal.h
new file mode 100644
index 0000000000..4ba6fce4ad
--- /dev/null
+++ b/libs/libevent/src/mm-internal.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef MM_INTERNAL_H_INCLUDED_
+#define MM_INTERNAL_H_INCLUDED_
+
+#include <sys/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef EVENT__DISABLE_MM_REPLACEMENT
+/* Internal use only: Memory allocation functions. We give them nice short
+ * mm_names for our own use, but make sure that the symbols have longer names
+ * so they don't conflict with other libraries (like, say, libmm). */
+
+/** Allocate uninitialized memory.
+ *
+ * @return On success, return a pointer to sz newly allocated bytes.
+ * On failure, set errno to ENOMEM and return NULL.
+ * If the argument sz is 0, simply return NULL.
+ */
+void *event_mm_malloc_(size_t sz);
+
+/** Allocate memory initialized to zero.
+ *
+ * @return On success, return a pointer to (count * size) newly allocated
+ * bytes, initialized to zero.
+ * On failure, or if the product would result in an integer overflow,
+ * set errno to ENOMEM and return NULL.
+ * If either arguments are 0, simply return NULL.
+ */
+void *event_mm_calloc_(size_t count, size_t size);
+
+/** Duplicate a string.
+ *
+ * @return On success, return a pointer to a newly allocated duplicate
+ * of a string.
+ * Set errno to ENOMEM and return NULL if a memory allocation error
+ * occurs (or would occur) in the process.
+ * If the argument str is NULL, set errno to EINVAL and return NULL.
+ */
+char *event_mm_strdup_(const char *str);
+
+void *event_mm_realloc_(void *p, size_t sz);
+void event_mm_free_(void *p);
+#define mm_malloc(sz) event_mm_malloc_(sz)
+#define mm_calloc(count, size) event_mm_calloc_((count), (size))
+#define mm_strdup(s) event_mm_strdup_(s)
+#define mm_realloc(p, sz) event_mm_realloc_((p), (sz))
+#define mm_free(p) event_mm_free_(p)
+#else
+#define mm_malloc(sz) malloc(sz)
+#define mm_calloc(n, sz) calloc((n), (sz))
+#define mm_strdup(s) strdup(s)
+#define mm_realloc(p, sz) realloc((p), (sz))
+#define mm_free(p) free(p)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libs/libevent/src/ratelim-internal.h b/libs/libevent/src/ratelim-internal.h
new file mode 100644
index 0000000000..6cc1cdde2c
--- /dev/null
+++ b/libs/libevent/src/ratelim-internal.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef RATELIM_INTERNAL_H_INCLUDED_
+#define RATELIM_INTERNAL_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "event2/util.h"
+
+/** A token bucket is an internal structure that tracks how many bytes we are
+ * currently willing to read or write on a given bufferevent or group of
+ * bufferevents */
+struct ev_token_bucket {
+ /** How many bytes are we willing to read or write right now? These
+ * values are signed so that we can do "defecit spending" */
+ ev_ssize_t read_limit, write_limit;
+ /** When was this bucket last updated? Measured in abstract 'ticks'
+ * relative to the token bucket configuration. */
+ ev_uint32_t last_updated;
+};
+
+/** Configuration info for a token bucket or set of token buckets. */
+struct ev_token_bucket_cfg {
+ /** How many bytes are we willing to read on average per tick? */
+ size_t read_rate;
+ /** How many bytes are we willing to read at most in any one tick? */
+ size_t read_maximum;
+ /** How many bytes are we willing to write on average per tick? */
+ size_t write_rate;
+ /** How many bytes are we willing to write at most in any one tick? */
+ size_t write_maximum;
+
+ /* How long is a tick? Note that fractions of a millisecond are
+ * ignored. */
+ struct timeval tick_timeout;
+
+ /* How long is a tick, in milliseconds? Derived from tick_timeout. */
+ unsigned msec_per_tick;
+};
+
+/** The current tick is 'current_tick': add bytes to 'bucket' as specified in
+ * 'cfg'. */
+int ev_token_bucket_update_(struct ev_token_bucket *bucket,
+ const struct ev_token_bucket_cfg *cfg,
+ ev_uint32_t current_tick);
+
+/** In which tick does 'tv' fall according to 'cfg'? Note that ticks can
+ * overflow easily; your code needs to handle this. */
+ev_uint32_t ev_token_bucket_get_tick_(const struct timeval *tv,
+ const struct ev_token_bucket_cfg *cfg);
+
+/** Adjust 'bucket' to respect 'cfg', and note that it was last updated in
+ * 'current_tick'. If 'reinitialize' is true, we are changing the
+ * configuration of 'bucket'; otherwise, we are setting it up for the first
+ * time.
+ */
+int ev_token_bucket_init_(struct ev_token_bucket *bucket,
+ const struct ev_token_bucket_cfg *cfg,
+ ev_uint32_t current_tick,
+ int reinitialize);
+
+int bufferevent_remove_from_rate_limit_group_internal_(struct bufferevent *bev,
+ int unsuspend);
+
+/** Decrease the read limit of 'b' by 'n' bytes */
+#define ev_token_bucket_decrement_read(b,n) \
+ do { \
+ (b)->read_limit -= (n); \
+ } while (0)
+/** Decrease the write limit of 'b' by 'n' bytes */
+#define ev_token_bucket_decrement_write(b,n) \
+ do { \
+ (b)->write_limit -= (n); \
+ } while (0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libs/libevent/src/signal.c b/libs/libevent/src/signal.c
new file mode 100644
index 0000000000..3f46295024
--- /dev/null
+++ b/libs/libevent/src/signal.c
@@ -0,0 +1,479 @@
+/* $OpenBSD: select.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
+
+/*
+ * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <winsock2.h>
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+#include <sys/types.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <errno.h>
+#ifdef EVENT__HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "event-internal.h"
+#include "event2/util.h"
+#include "evsignal-internal.h"
+#include "log-internal.h"
+#include "evmap-internal.h"
+#include "evthread-internal.h"
+
+/*
+ signal.c
+
+ This is the signal-handling implementation we use for backends that don't
+ have a better way to do signal handling. It uses sigaction() or signal()
+ to set a signal handler, and a socket pair to tell the event base when
+
+ Note that I said "the event base" : only one event base can be set up to use
+ this at a time. For historical reasons and backward compatibility, if you
+ add an event for a signal to event_base A, then add an event for a signal
+ (any signal!) to event_base B, event_base B will get informed about the
+ signal, but event_base A won't.
+
+ It would be neat to change this behavior in some future version of Libevent.
+ kqueue already does something far more sensible. We can make all backends
+ on Linux do a reasonable thing using signalfd.
+*/
+
+#ifndef _WIN32
+/* Windows wants us to call our signal handlers as __cdecl. Nobody else
+ * expects you to do anything crazy like this. */
+#define __cdecl
+#endif
+
+static int evsig_add(struct event_base *, evutil_socket_t, short, short, void *);
+static int evsig_del(struct event_base *, evutil_socket_t, short, short, void *);
+
+static const struct eventop evsigops = {
+ "signal",
+ NULL,
+ evsig_add,
+ evsig_del,
+ NULL,
+ NULL,
+ 0, 0, 0
+};
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+/* Lock for evsig_base and evsig_base_n_signals_added fields. */
+static void *evsig_base_lock = NULL;
+#endif
+/* The event base that's currently getting informed about signals. */
+static struct event_base *evsig_base = NULL;
+/* A copy of evsig_base->sigev_n_signals_added. */
+static int evsig_base_n_signals_added = 0;
+static evutil_socket_t evsig_base_fd = -1;
+
+static void __cdecl evsig_handler(int sig);
+
+#define EVSIGBASE_LOCK() EVLOCK_LOCK(evsig_base_lock, 0)
+#define EVSIGBASE_UNLOCK() EVLOCK_UNLOCK(evsig_base_lock, 0)
+
+void
+evsig_set_base_(struct event_base *base)
+{
+ EVSIGBASE_LOCK();
+ evsig_base = base;
+ evsig_base_n_signals_added = base->sig.ev_n_signals_added;
+ evsig_base_fd = base->sig.ev_signal_pair[1];
+ EVSIGBASE_UNLOCK();
+}
+
+/* Callback for when the signal handler write a byte to our signaling socket */
+static void
+evsig_cb(evutil_socket_t fd, short what, void *arg)
+{
+ static char signals[1024];
+ ev_ssize_t n;
+ int i;
+ int ncaught[NSIG];
+ struct event_base *base;
+
+ base = arg;
+
+ memset(&ncaught, 0, sizeof(ncaught));
+
+ while (1) {
+#ifdef _WIN32
+ n = recv(fd, signals, sizeof(signals), 0);
+#else
+ n = read(fd, signals, sizeof(signals));
+#endif
+ if (n == -1) {
+ int err = evutil_socket_geterror(fd);
+ if (! EVUTIL_ERR_RW_RETRIABLE(err))
+ event_sock_err(1, fd, "%s: recv", __func__);
+ break;
+ } else if (n == 0) {
+ /* XXX warn? */
+ break;
+ }
+ for (i = 0; i < n; ++i) {
+ ev_uint8_t sig = signals[i];
+ if (sig < NSIG)
+ ncaught[sig]++;
+ }
+ }
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ for (i = 0; i < NSIG; ++i) {
+ if (ncaught[i])
+ evmap_signal_active_(base, i, ncaught[i]);
+ }
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+int
+evsig_init_(struct event_base *base)
+{
+ /*
+ * Our signal handler is going to write to one end of the socket
+ * pair to wake up our event loop. The event loop then scans for
+ * signals that got delivered.
+ */
+ if (evutil_make_internal_pipe_(base->sig.ev_signal_pair) == -1) {
+#ifdef _WIN32
+ /* Make this nonfatal on win32, where sometimes people
+ have localhost firewalled. */
+ event_sock_warn(-1, "%s: socketpair", __func__);
+#else
+ event_sock_err(1, -1, "%s: socketpair", __func__);
+#endif
+ return -1;
+ }
+
+ if (base->sig.sh_old) {
+ mm_free(base->sig.sh_old);
+ }
+ base->sig.sh_old = NULL;
+ base->sig.sh_old_max = 0;
+
+ event_assign(&base->sig.ev_signal, base, base->sig.ev_signal_pair[0],
+ EV_READ | EV_PERSIST, evsig_cb, base);
+
+ base->sig.ev_signal.ev_flags |= EVLIST_INTERNAL;
+ event_priority_set(&base->sig.ev_signal, 0);
+
+ base->evsigsel = &evsigops;
+
+ return 0;
+}
+
+/* Helper: set the signal handler for evsignal to handler in base, so that
+ * we can restore the original handler when we clear the current one. */
+int
+evsig_set_handler_(struct event_base *base,
+ int evsignal, void (__cdecl *handler)(int))
+{
+#ifdef EVENT__HAVE_SIGACTION
+ struct sigaction sa;
+#else
+ ev_sighandler_t sh;
+#endif
+ struct evsig_info *sig = &base->sig;
+ void *p;
+
+ /*
+ * resize saved signal handler array up to the highest signal number.
+ * a dynamic array is used to keep footprint on the low side.
+ */
+ if (evsignal >= sig->sh_old_max) {
+ int new_max = evsignal + 1;
+ event_debug(("%s: evsignal (%d) >= sh_old_max (%d), resizing",
+ __func__, evsignal, sig->sh_old_max));
+ p = mm_realloc(sig->sh_old, new_max * sizeof(*sig->sh_old));
+ if (p == NULL) {
+ event_warn("realloc");
+ return (-1);
+ }
+
+ memset((char *)p + sig->sh_old_max * sizeof(*sig->sh_old),
+ 0, (new_max - sig->sh_old_max) * sizeof(*sig->sh_old));
+
+ sig->sh_old_max = new_max;
+ sig->sh_old = p;
+ }
+
+ /* allocate space for previous handler out of dynamic array */
+ sig->sh_old[evsignal] = mm_malloc(sizeof *sig->sh_old[evsignal]);
+ if (sig->sh_old[evsignal] == NULL) {
+ event_warn("malloc");
+ return (-1);
+ }
+
+ /* save previous handler and setup new handler */
+#ifdef EVENT__HAVE_SIGACTION
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = handler;
+ sa.sa_flags |= SA_RESTART;
+ sigfillset(&sa.sa_mask);
+
+ if (sigaction(evsignal, &sa, sig->sh_old[evsignal]) == -1) {
+ event_warn("sigaction");
+ mm_free(sig->sh_old[evsignal]);
+ sig->sh_old[evsignal] = NULL;
+ return (-1);
+ }
+#else
+ if ((sh = signal(evsignal, handler)) == SIG_ERR) {
+ event_warn("signal");
+ mm_free(sig->sh_old[evsignal]);
+ sig->sh_old[evsignal] = NULL;
+ return (-1);
+ }
+ *sig->sh_old[evsignal] = sh;
+#endif
+
+ return (0);
+}
+
+static int
+evsig_add(struct event_base *base, evutil_socket_t evsignal, short old, short events, void *p)
+{
+ struct evsig_info *sig = &base->sig;
+ (void)p;
+
+ EVUTIL_ASSERT(evsignal >= 0 && evsignal < NSIG);
+
+ /* catch signals if they happen quickly */
+ EVSIGBASE_LOCK();
+ if (evsig_base != base && evsig_base_n_signals_added) {
+ event_warnx("Added a signal to event base %p with signals "
+ "already added to event_base %p. Only one can have "
+ "signals at a time with the %s backend. The base with "
+ "the most recently added signal or the most recent "
+ "event_base_loop() call gets preference; do "
+ "not rely on this behavior in future Libevent versions.",
+ base, evsig_base, base->evsel->name);
+ }
+ evsig_base = base;
+ evsig_base_n_signals_added = ++sig->ev_n_signals_added;
+ evsig_base_fd = base->sig.ev_signal_pair[1];
+ EVSIGBASE_UNLOCK();
+
+ event_debug(("%s: %d: changing signal handler", __func__, (int)evsignal));
+ if (evsig_set_handler_(base, (int)evsignal, evsig_handler) == -1) {
+ goto err;
+ }
+
+
+ if (!sig->ev_signal_added) {
+ if (event_add_nolock_(&sig->ev_signal, NULL, 0))
+ goto err;
+ sig->ev_signal_added = 1;
+ }
+
+ return (0);
+
+err:
+ EVSIGBASE_LOCK();
+ --evsig_base_n_signals_added;
+ --sig->ev_n_signals_added;
+ EVSIGBASE_UNLOCK();
+ return (-1);
+}
+
+int
+evsig_restore_handler_(struct event_base *base, int evsignal)
+{
+ int ret = 0;
+ struct evsig_info *sig = &base->sig;
+#ifdef EVENT__HAVE_SIGACTION
+ struct sigaction *sh;
+#else
+ ev_sighandler_t *sh;
+#endif
+
+ if (evsignal >= sig->sh_old_max) {
+ /* Can't actually restore. */
+ /* XXXX.*/
+ return 0;
+ }
+
+ /* restore previous handler */
+ sh = sig->sh_old[evsignal];
+ sig->sh_old[evsignal] = NULL;
+#ifdef EVENT__HAVE_SIGACTION
+ if (sigaction(evsignal, sh, NULL) == -1) {
+ event_warn("sigaction");
+ ret = -1;
+ }
+#else
+ if (signal(evsignal, *sh) == SIG_ERR) {
+ event_warn("signal");
+ ret = -1;
+ }
+#endif
+
+ mm_free(sh);
+
+ return ret;
+}
+
+static int
+evsig_del(struct event_base *base, evutil_socket_t evsignal, short old, short events, void *p)
+{
+ EVUTIL_ASSERT(evsignal >= 0 && evsignal < NSIG);
+
+ event_debug(("%s: "EV_SOCK_FMT": restoring signal handler",
+ __func__, EV_SOCK_ARG(evsignal)));
+
+ EVSIGBASE_LOCK();
+ --evsig_base_n_signals_added;
+ --base->sig.ev_n_signals_added;
+ EVSIGBASE_UNLOCK();
+
+ return (evsig_restore_handler_(base, (int)evsignal));
+}
+
+static void __cdecl
+evsig_handler(int sig)
+{
+ int save_errno = errno;
+#ifdef _WIN32
+ int socket_errno = EVUTIL_SOCKET_ERROR();
+#endif
+ ev_uint8_t msg;
+
+ if (evsig_base == NULL) {
+ event_warnx(
+ "%s: received signal %d, but have no base configured",
+ __func__, sig);
+ return;
+ }
+
+#ifndef EVENT__HAVE_SIGACTION
+ signal(sig, evsig_handler);
+#endif
+
+ /* Wake up our notification mechanism */
+ msg = sig;
+#ifdef _WIN32
+ send(evsig_base_fd, (char*)&msg, 1, 0);
+#else
+ {
+ int r = write(evsig_base_fd, (char*)&msg, 1);
+ (void)r; /* Suppress 'unused return value' and 'unused var' */
+ }
+#endif
+ errno = save_errno;
+#ifdef _WIN32
+ EVUTIL_SET_SOCKET_ERROR(socket_errno);
+#endif
+}
+
+void
+evsig_dealloc_(struct event_base *base)
+{
+ int i = 0;
+ if (base->sig.ev_signal_added) {
+ event_del(&base->sig.ev_signal);
+ base->sig.ev_signal_added = 0;
+ }
+ /* debug event is created in evsig_init_/event_assign even when
+ * ev_signal_added == 0, so unassign is required */
+ event_debug_unassign(&base->sig.ev_signal);
+
+ for (i = 0; i < NSIG; ++i) {
+ if (i < base->sig.sh_old_max && base->sig.sh_old[i] != NULL)
+ evsig_restore_handler_(base, i);
+ }
+ EVSIGBASE_LOCK();
+ if (base == evsig_base) {
+ evsig_base = NULL;
+ evsig_base_n_signals_added = 0;
+ evsig_base_fd = -1;
+ }
+ EVSIGBASE_UNLOCK();
+
+ if (base->sig.ev_signal_pair[0] != -1) {
+ evutil_closesocket(base->sig.ev_signal_pair[0]);
+ base->sig.ev_signal_pair[0] = -1;
+ }
+ if (base->sig.ev_signal_pair[1] != -1) {
+ evutil_closesocket(base->sig.ev_signal_pair[1]);
+ base->sig.ev_signal_pair[1] = -1;
+ }
+ base->sig.sh_old_max = 0;
+
+ /* per index frees are handled in evsig_del() */
+ if (base->sig.sh_old) {
+ mm_free(base->sig.sh_old);
+ base->sig.sh_old = NULL;
+ }
+}
+
+static void
+evsig_free_globals_locks(void)
+{
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (evsig_base_lock != NULL) {
+ EVTHREAD_FREE_LOCK(evsig_base_lock, 0);
+ evsig_base_lock = NULL;
+ }
+#endif
+ return;
+}
+
+void
+evsig_free_globals_(void)
+{
+ evsig_free_globals_locks();
+}
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+int
+evsig_global_setup_locks_(const int enable_locks)
+{
+ EVTHREAD_SETUP_GLOBAL_LOCK(evsig_base_lock, 0);
+ return 0;
+}
+
+#endif
diff --git a/libs/libevent/src/strlcpy-internal.h b/libs/libevent/src/strlcpy-internal.h
new file mode 100644
index 0000000000..cfc27ec662
--- /dev/null
+++ b/libs/libevent/src/strlcpy-internal.h
@@ -0,0 +1,22 @@
+#ifndef STRLCPY_INTERNAL_H_INCLUDED_
+#define STRLCPY_INTERNAL_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifndef EVENT__HAVE_STRLCPY
+#include <string.h>
+size_t event_strlcpy_(char *dst, const char *src, size_t siz);
+#define strlcpy event_strlcpy_
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
diff --git a/libs/libevent/src/strlcpy.c b/libs/libevent/src/strlcpy.c
new file mode 100644
index 0000000000..3876475f5a
--- /dev/null
+++ b/libs/libevent/src/strlcpy.c
@@ -0,0 +1,75 @@
+/* $OpenBSD: strlcpy.c,v 1.5 2001/05/13 15:40:16 deraadt Exp $ */
+
+/*
+ * Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char *rcsid = "$OpenBSD: strlcpy.c,v 1.5 2001/05/13 15:40:16 deraadt Exp $";
+#endif /* LIBC_SCCS and not lint */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+
+#ifndef EVENT__HAVE_STRLCPY
+#include "strlcpy-internal.h"
+
+/*
+ * Copy src to string dst of size siz. At most siz-1 characters
+ * will be copied. Always NUL terminates (unless siz == 0).
+ * Returns strlen(src); if retval >= siz, truncation occurred.
+ */
+size_t
+event_strlcpy_(dst, src, siz)
+ char *dst;
+ const char *src;
+ size_t siz;
+{
+ register char *d = dst;
+ register const char *s = src;
+ register size_t n = siz;
+
+ /* Copy as many bytes as will fit */
+ if (n != 0 && --n != 0) {
+ do {
+ if ((*d++ = *s++) == 0)
+ break;
+ } while (--n != 0);
+ }
+
+ /* Not enough room in dst, add NUL and traverse rest of src */
+ if (n == 0) {
+ if (siz != 0)
+ *d = '\0'; /* NUL-terminate dst */
+ while (*s++)
+ ;
+ }
+
+ return (s - src - 1); /* count does not include NUL */
+}
+#endif
diff --git a/libs/libevent/src/time-internal.h b/libs/libevent/src/time-internal.h
new file mode 100644
index 0000000000..2c584fa752
--- /dev/null
+++ b/libs/libevent/src/time-internal.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TIME_INTERNAL_H_INCLUDED_
+#define TIME_INTERNAL_H_INCLUDED_
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef EVENT__HAVE_MACH_MACH_TIME_H
+/* For mach_timebase_info */
+#include <mach/mach_time.h>
+#endif
+
+#include <time.h>
+
+#include "event2/util.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(EVENT__HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
+#define HAVE_POSIX_MONOTONIC
+#elif defined(EVENT__HAVE_MACH_ABSOLUTE_TIME)
+#define HAVE_MACH_MONOTONIC
+#elif defined(_WIN32)
+#define HAVE_WIN32_MONOTONIC
+#else
+#define HAVE_FALLBACK_MONOTONIC
+#endif
+
+long evutil_tv_to_msec_(const struct timeval *tv);
+void evutil_usleep_(const struct timeval *tv);
+
+#ifdef _WIN32
+typedef ULONGLONG (WINAPI *ev_GetTickCount_func)(void);
+#endif
+
+struct evutil_monotonic_timer {
+
+#ifdef HAVE_MACH_MONOTONIC
+ struct mach_timebase_info mach_timebase_units;
+#endif
+
+#ifdef HAVE_POSIX_MONOTONIC
+ int monotonic_clock;
+#endif
+
+#ifdef HAVE_WIN32_MONOTONIC
+ ev_GetTickCount_func GetTickCount64_fn;
+ ev_GetTickCount_func GetTickCount_fn;
+ ev_uint64_t last_tick_count;
+ ev_uint64_t adjust_tick_count;
+
+ ev_uint64_t first_tick;
+ ev_uint64_t first_counter;
+ double usec_per_count;
+ int use_performance_counter;
+#endif
+
+ struct timeval adjust_monotonic_clock;
+ struct timeval last_time;
+};
+
+int evutil_configure_monotonic_time_(struct evutil_monotonic_timer *mt,
+ int flags);
+int evutil_gettime_monotonic_(struct evutil_monotonic_timer *mt, struct timeval *tv);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT_INTERNAL_H_INCLUDED_ */
diff --git a/libs/libevent/src/util-internal.h b/libs/libevent/src/util-internal.h
new file mode 100644
index 0000000000..a6318f2890
--- /dev/null
+++ b/libs/libevent/src/util-internal.h
@@ -0,0 +1,485 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef UTIL_INTERNAL_H_INCLUDED_
+#define UTIL_INTERNAL_H_INCLUDED_
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <errno.h>
+
+/* For EVUTIL_ASSERT */
+#include "log-internal.h"
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#ifdef EVENT__HAVE_SYS_EVENTFD_H
+#include <sys/eventfd.h>
+#endif
+#include "event2/util.h"
+
+#include "time-internal.h"
+#include "ipv6-internal.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* If we need magic to say "inline", get it for free internally. */
+#ifdef EVENT__inline
+#if(_MSC_VER < 1900)
+#define inline __inline
+#else
+#define inline EVENT__inline
+#endif
+
+
+#endif
+#ifdef EVENT____func__
+#define __func__ EVENT____func__
+#endif
+
+/* A good no-op to use in macro definitions. */
+#define EVUTIL_NIL_STMT_ ((void)0)
+/* A no-op that tricks the compiler into thinking a condition is used while
+ * definitely not making any code for it. Used to compile out asserts while
+ * avoiding "unused variable" warnings. The "!" forces the compiler to
+ * do the sizeof() on an int, in case "condition" is a bitfield value.
+ */
+#define EVUTIL_NIL_CONDITION_(condition) do { \
+ (void)sizeof(!(condition)); \
+} while(0)
+
+/* Internal use only: macros to match patterns of error codes in a
+ cross-platform way. We need these macros because of two historical
+ reasons: first, nonblocking IO functions are generally written to give an
+ error on the "blocked now, try later" case, so sometimes an error from a
+ read, write, connect, or accept means "no error; just wait for more
+ data," and we need to look at the error code. Second, Windows defines
+ a different set of error codes for sockets. */
+
+#ifndef _WIN32
+
+#if EAGAIN == EWOULDBLOCK
+#define EVUTIL_ERR_IS_EAGAIN(e) \
+ ((e) == EAGAIN)
+#else
+#define EVUTIL_ERR_IS_EAGAIN(e) \
+ ((e) == EAGAIN || (e) == EWOULDBLOCK)
+#endif
+
+/* True iff e is an error that means a read/write operation can be retried. */
+#define EVUTIL_ERR_RW_RETRIABLE(e) \
+ ((e) == EINTR || EVUTIL_ERR_IS_EAGAIN(e))
+/* True iff e is an error that means an connect can be retried. */
+#define EVUTIL_ERR_CONNECT_RETRIABLE(e) \
+ ((e) == EINTR || (e) == EINPROGRESS)
+/* True iff e is an error that means a accept can be retried. */
+#define EVUTIL_ERR_ACCEPT_RETRIABLE(e) \
+ ((e) == EINTR || EVUTIL_ERR_IS_EAGAIN(e) || (e) == ECONNABORTED)
+
+/* True iff e is an error that means the connection was refused */
+#define EVUTIL_ERR_CONNECT_REFUSED(e) \
+ ((e) == ECONNREFUSED)
+
+#else
+/* Win32 */
+
+#define EVUTIL_ERR_IS_EAGAIN(e) \
+ ((e) == WSAEWOULDBLOCK || (e) == EAGAIN)
+
+#define EVUTIL_ERR_RW_RETRIABLE(e) \
+ ((e) == WSAEWOULDBLOCK || \
+ (e) == WSAEINTR)
+
+#define EVUTIL_ERR_CONNECT_RETRIABLE(e) \
+ ((e) == WSAEWOULDBLOCK || \
+ (e) == WSAEINTR || \
+ (e) == WSAEINPROGRESS || \
+ (e) == WSAEINVAL)
+
+#define EVUTIL_ERR_ACCEPT_RETRIABLE(e) \
+ EVUTIL_ERR_RW_RETRIABLE(e)
+
+#define EVUTIL_ERR_CONNECT_REFUSED(e) \
+ ((e) == WSAECONNREFUSED)
+
+#endif
+
+/* Arguments for shutdown() */
+#ifdef SHUT_RD
+#define EVUTIL_SHUT_RD SHUT_RD
+#else
+#define EVUTIL_SHUT_RD 0
+#endif
+#ifdef SHUT_WR
+#define EVUTIL_SHUT_WR SHUT_WR
+#else
+#define EVUTIL_SHUT_WR 1
+#endif
+#ifdef SHUT_BOTH
+#define EVUTIL_SHUT_BOTH SHUT_BOTH
+#else
+#define EVUTIL_SHUT_BOTH 2
+#endif
+
+/* Helper: Verify that all the elements in 'dlist' are internally consistent.
+ * Checks for circular lists and bad prev/next pointers.
+ *
+ * Example usage:
+ * EVUTIL_ASSERT_LIST_OK(eventlist, event, ev_next);
+ */
+#define EVUTIL_ASSERT_LIST_OK(dlist, type, field) do { \
+ struct type *elm1, *elm2, **nextp; \
+ if (LIST_EMPTY((dlist))) \
+ break; \
+ \
+ /* Check list for circularity using Floyd's */ \
+ /* 'Tortoise and Hare' algorithm */ \
+ elm1 = LIST_FIRST((dlist)); \
+ elm2 = LIST_NEXT(elm1, field); \
+ while (elm1 && elm2) { \
+ EVUTIL_ASSERT(elm1 != elm2); \
+ elm1 = LIST_NEXT(elm1, field); \
+ elm2 = LIST_NEXT(elm2, field); \
+ if (!elm2) \
+ break; \
+ EVUTIL_ASSERT(elm1 != elm2); \
+ elm2 = LIST_NEXT(elm2, field); \
+ } \
+ \
+ /* Now check next and prev pointers for consistency. */ \
+ nextp = &LIST_FIRST((dlist)); \
+ elm1 = LIST_FIRST((dlist)); \
+ while (elm1) { \
+ EVUTIL_ASSERT(*nextp == elm1); \
+ EVUTIL_ASSERT(nextp == elm1->field.le_prev); \
+ nextp = &LIST_NEXT(elm1, field); \
+ elm1 = *nextp; \
+ } \
+ } while (0)
+
+/* Helper: Verify that all the elements in a TAILQ are internally consistent.
+ * Checks for circular lists and bad prev/next pointers.
+ *
+ * Example usage:
+ * EVUTIL_ASSERT_TAILQ_OK(activelist, event, ev_active_next);
+ */
+#define EVUTIL_ASSERT_TAILQ_OK(tailq, type, field) do { \
+ struct type *elm1, *elm2, **nextp; \
+ if (TAILQ_EMPTY((tailq))) \
+ break; \
+ \
+ /* Check list for circularity using Floyd's */ \
+ /* 'Tortoise and Hare' algorithm */ \
+ elm1 = TAILQ_FIRST((tailq)); \
+ elm2 = TAILQ_NEXT(elm1, field); \
+ while (elm1 && elm2) { \
+ EVUTIL_ASSERT(elm1 != elm2); \
+ elm1 = TAILQ_NEXT(elm1, field); \
+ elm2 = TAILQ_NEXT(elm2, field); \
+ if (!elm2) \
+ break; \
+ EVUTIL_ASSERT(elm1 != elm2); \
+ elm2 = TAILQ_NEXT(elm2, field); \
+ } \
+ \
+ /* Now check next and prev pointers for consistency. */ \
+ nextp = &TAILQ_FIRST((tailq)); \
+ elm1 = TAILQ_FIRST((tailq)); \
+ while (elm1) { \
+ EVUTIL_ASSERT(*nextp == elm1); \
+ EVUTIL_ASSERT(nextp == elm1->field.tqe_prev); \
+ nextp = &TAILQ_NEXT(elm1, field); \
+ elm1 = *nextp; \
+ } \
+ EVUTIL_ASSERT(nextp == (tailq)->tqh_last); \
+ } while (0)
+
+/* Locale-independent replacements for some ctypes functions. Use these
+ * when you care about ASCII's notion of character types, because you are about
+ * to send those types onto the wire.
+ */
+int EVUTIL_ISALPHA_(char c);
+int EVUTIL_ISALNUM_(char c);
+int EVUTIL_ISSPACE_(char c);
+int EVUTIL_ISDIGIT_(char c);
+int EVUTIL_ISXDIGIT_(char c);
+int EVUTIL_ISPRINT_(char c);
+int EVUTIL_ISLOWER_(char c);
+int EVUTIL_ISUPPER_(char c);
+char EVUTIL_TOUPPER_(char c);
+char EVUTIL_TOLOWER_(char c);
+
+/** Remove all trailing horizontal whitespace (space or tab) from the end of a
+ * string */
+void evutil_rtrim_lws_(char *);
+
+
+/** Helper macro. If we know that a given pointer points to a field in a
+ structure, return a pointer to the structure itself. Used to implement
+ our half-baked C OO. Example:
+
+ struct subtype {
+ int x;
+ struct supertype common;
+ int y;
+ };
+ ...
+ void fn(struct supertype *super) {
+ struct subtype *sub = EVUTIL_UPCAST(super, struct subtype, common);
+ ...
+ }
+ */
+#define EVUTIL_UPCAST(ptr, type, field) \
+ ((type *)(((char*)(ptr)) - evutil_offsetof(type, field)))
+
+/* As open(pathname, flags, mode), except that the file is always opened with
+ * the close-on-exec flag set. (And the mode argument is mandatory.)
+ */
+int evutil_open_closeonexec_(const char *pathname, int flags, unsigned mode);
+
+int evutil_read_file_(const char *filename, char **content_out, size_t *len_out,
+ int is_binary);
+
+int evutil_socket_connect_(evutil_socket_t *fd_ptr, const struct sockaddr *sa, int socklen);
+
+int evutil_socket_finished_connecting_(evutil_socket_t fd);
+
+int evutil_ersatz_socketpair_(int, int , int, evutil_socket_t[]);
+
+int evutil_resolve_(int family, const char *hostname, struct sockaddr *sa,
+ ev_socklen_t *socklen, int port);
+
+const char *evutil_getenv_(const char *name);
+
+/* Structure to hold the state of our weak random number generator.
+ */
+struct evutil_weakrand_state {
+ ev_uint32_t seed;
+};
+
+#define EVUTIL_WEAKRAND_MAX EV_INT32_MAX
+
+/* Initialize the state of a week random number generator based on 'seed'. If
+ * the seed is 0, construct a new seed based on not-very-strong platform
+ * entropy, like the PID and the time of day.
+ *
+ * This function, and the other evutil_weakrand* functions, are meant for
+ * speed, not security or statistical strength. If you need a RNG which an
+ * attacker can't predict, or which passes strong statistical tests, use the
+ * evutil_secure_rng* functions instead.
+ */
+ev_uint32_t evutil_weakrand_seed_(struct evutil_weakrand_state *state, ev_uint32_t seed);
+/* Return a pseudorandom value between 0 and EVUTIL_WEAKRAND_MAX inclusive.
+ * Updates the state in 'seed' as needed -- this value must be protected by a
+ * lock.
+ */
+ev_int32_t evutil_weakrand_(struct evutil_weakrand_state *seed);
+/* Return a pseudorandom value x such that 0 <= x < top. top must be no more
+ * than EVUTIL_WEAKRAND_MAX. Updates the state in 'seed' as needed -- this
+ * value must be proteced by a lock */
+ev_int32_t evutil_weakrand_range_(struct evutil_weakrand_state *seed, ev_int32_t top);
+
+/* Evaluates to the same boolean value as 'p', and hints to the compiler that
+ * we expect this value to be false. */
+#if defined(__GNUC__) && __GNUC__ >= 3 /* gcc 3.0 or later */
+#define EVUTIL_UNLIKELY(p) __builtin_expect(!!(p),0)
+#else
+#define EVUTIL_UNLIKELY(p) (p)
+#endif
+
+/* Replacement for assert() that calls event_errx on failure. */
+#ifdef NDEBUG
+#define EVUTIL_ASSERT(cond) EVUTIL_NIL_CONDITION_(cond)
+#define EVUTIL_FAILURE_CHECK(cond) 0
+#else
+#define EVUTIL_ASSERT(cond) \
+ do { \
+ if (EVUTIL_UNLIKELY(!(cond))) { \
+ event_errx(EVENT_ERR_ABORT_, \
+ "%s:%d: Assertion %s failed in %s", \
+ __FILE__,__LINE__,#cond,__func__); \
+ /* In case a user-supplied handler tries to */ \
+ /* return control to us, log and abort here. */ \
+ (void)fprintf(stderr, \
+ "%s:%d: Assertion %s failed in %s", \
+ __FILE__,__LINE__,#cond,__func__); \
+ abort(); \
+ } \
+ } while (0)
+#define EVUTIL_FAILURE_CHECK(cond) EVUTIL_UNLIKELY(cond)
+#endif
+
+#ifndef EVENT__HAVE_STRUCT_SOCKADDR_STORAGE
+/* Replacement for sockaddr storage that we can use internally on platforms
+ * that lack it. It is not space-efficient, but neither is sockaddr_storage.
+ */
+struct sockaddr_storage {
+ union {
+ struct sockaddr ss_sa;
+ struct sockaddr_in ss_sin;
+ struct sockaddr_in6 ss_sin6;
+ char ss_padding[128];
+ } ss_union;
+};
+#define ss_family ss_union.ss_sa.sa_family
+#endif
+
+/* Internal addrinfo error code. This one is returned from only from
+ * evutil_getaddrinfo_common_, when we are sure that we'll have to hit a DNS
+ * server. */
+#define EVUTIL_EAI_NEED_RESOLVE -90002
+
+struct evdns_base;
+struct evdns_getaddrinfo_request;
+typedef struct evdns_getaddrinfo_request* (*evdns_getaddrinfo_fn)(
+ struct evdns_base *base,
+ const char *nodename, const char *servname,
+ const struct evutil_addrinfo *hints_in,
+ void (*cb)(int, struct evutil_addrinfo *, void *), void *arg);
+
+void evutil_set_evdns_getaddrinfo_fn_(evdns_getaddrinfo_fn fn);
+
+struct evutil_addrinfo *evutil_new_addrinfo_(struct sockaddr *sa,
+ ev_socklen_t socklen, const struct evutil_addrinfo *hints);
+struct evutil_addrinfo *evutil_addrinfo_append_(struct evutil_addrinfo *first,
+ struct evutil_addrinfo *append);
+void evutil_adjust_hints_for_addrconfig_(struct evutil_addrinfo *hints);
+int evutil_getaddrinfo_common_(const char *nodename, const char *servname,
+ struct evutil_addrinfo *hints, struct evutil_addrinfo **res, int *portnum);
+
+int evutil_getaddrinfo_async_(struct evdns_base *dns_base,
+ const char *nodename, const char *servname,
+ const struct evutil_addrinfo *hints_in,
+ void (*cb)(int, struct evutil_addrinfo *, void *), void *arg);
+
+/** Return true iff sa is a looback address. (That is, it is 127.0.0.1/8, or
+ * ::1). */
+int evutil_sockaddr_is_loopback_(const struct sockaddr *sa);
+
+
+/**
+ Formats a sockaddr sa into a string buffer of size outlen stored in out.
+ Returns a pointer to out. Always writes something into out, so it's safe
+ to use the output of this function without checking it for NULL.
+ */
+const char *evutil_format_sockaddr_port_(const struct sockaddr *sa, char *out, size_t outlen);
+
+int evutil_hex_char_to_int_(char c);
+
+
+void evutil_free_secure_rng_globals_(void);
+void evutil_free_globals_(void);
+
+#ifdef _WIN32
+HMODULE evutil_load_windows_system_library_(const TCHAR *library_name);
+#endif
+
+#ifndef EV_SIZE_FMT
+#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__)
+#define EV_U64_FMT "%I64u"
+#define EV_I64_FMT "%I64d"
+#define EV_I64_ARG(x) ((__int64)(x))
+#define EV_U64_ARG(x) ((unsigned __int64)(x))
+#else
+#define EV_U64_FMT "%llu"
+#define EV_I64_FMT "%lld"
+#define EV_I64_ARG(x) ((long long)(x))
+#define EV_U64_ARG(x) ((unsigned long long)(x))
+#endif
+#endif
+
+#ifdef _WIN32
+#define EV_SOCK_FMT EV_I64_FMT
+#define EV_SOCK_ARG(x) EV_I64_ARG((x))
+#else
+#define EV_SOCK_FMT "%d"
+#define EV_SOCK_ARG(x) (x)
+#endif
+
+#if defined(__STDC__) && defined(__STDC_VERSION__) && !defined(__MINGW64_VERSION_MAJOR)
+#if (__STDC_VERSION__ >= 199901L)
+#define EV_SIZE_FMT "%zu"
+#define EV_SSIZE_FMT "%zd"
+#define EV_SIZE_ARG(x) (x)
+#define EV_SSIZE_ARG(x) (x)
+#endif
+#endif
+
+#ifndef EV_SIZE_FMT
+#if (EVENT__SIZEOF_SIZE_T <= EVENT__SIZEOF_LONG)
+#define EV_SIZE_FMT "%lu"
+#define EV_SSIZE_FMT "%ld"
+#define EV_SIZE_ARG(x) ((unsigned long)(x))
+#define EV_SSIZE_ARG(x) ((long)(x))
+#else
+#define EV_SIZE_FMT EV_U64_FMT
+#define EV_SSIZE_FMT EV_I64_FMT
+#define EV_SIZE_ARG(x) EV_U64_ARG(x)
+#define EV_SSIZE_ARG(x) EV_I64_ARG(x)
+#endif
+#endif
+
+evutil_socket_t evutil_socket_(int domain, int type, int protocol);
+evutil_socket_t evutil_accept4_(evutil_socket_t sockfd, struct sockaddr *addr,
+ ev_socklen_t *addrlen, int flags);
+
+ /* used by one of the test programs.. */
+EVENT2_EXPORT_SYMBOL
+int evutil_make_internal_pipe_(evutil_socket_t fd[2]);
+evutil_socket_t evutil_eventfd_(unsigned initval, int flags);
+
+#ifdef SOCK_NONBLOCK
+#define EVUTIL_SOCK_NONBLOCK SOCK_NONBLOCK
+#else
+#define EVUTIL_SOCK_NONBLOCK 0x4000000
+#endif
+#ifdef SOCK_CLOEXEC
+#define EVUTIL_SOCK_CLOEXEC SOCK_CLOEXEC
+#else
+#define EVUTIL_SOCK_CLOEXEC 0x80000000
+#endif
+#ifdef EFD_NONBLOCK
+#define EVUTIL_EFD_NONBLOCK EFD_NONBLOCK
+#else
+#define EVUTIL_EFD_NONBLOCK 0x4000
+#endif
+#ifdef EFD_CLOEXEC
+#define EVUTIL_EFD_CLOEXEC EFD_CLOEXEC
+#else
+#define EVUTIL_EFD_CLOEXEC 0x8000
+#endif
+
+void evutil_memclear_(void *mem, size_t len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libs/libevent/src/win32select.c b/libs/libevent/src/win32select.c
new file mode 100644
index 0000000000..1766858c2c
--- /dev/null
+++ b/libs/libevent/src/win32select.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright 2007-2012 Niels Provos and Nick Mathewson
+ * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright 2003 Michael A. Davis <mike@datanerds.net>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+
+#include <winsock2.h>
+#include <windows.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <limits.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include "event2/util.h"
+#include "util-internal.h"
+#include "log-internal.h"
+#include "event2/event.h"
+#include "event-internal.h"
+#include "evmap-internal.h"
+#include "event2/thread.h"
+#include "evthread-internal.h"
+#include "time-internal.h"
+
+#define XFREE(ptr) do { if (ptr) mm_free(ptr); } while (0)
+
+extern struct event_list timequeue;
+extern struct event_list addqueue;
+
+struct win_fd_set {
+ unsigned int fd_count;
+ SOCKET fd_array[1];
+};
+
+/* MSDN says this is required to handle SIGFPE */
+volatile double SIGFPE_REQ = 0.0f;
+
+struct idx_info {
+ int read_pos_plus1;
+ int write_pos_plus1;
+};
+
+struct win32op {
+ unsigned num_fds_in_fd_sets;
+ int resize_out_sets;
+ struct win_fd_set *readset_in;
+ struct win_fd_set *writeset_in;
+ struct win_fd_set *readset_out;
+ struct win_fd_set *writeset_out;
+ struct win_fd_set *exset_out;
+ unsigned signals_are_broken : 1;
+};
+
+static void *win32_init(struct event_base *);
+static int win32_add(struct event_base *, evutil_socket_t, short old, short events, void *idx_);
+static int win32_del(struct event_base *, evutil_socket_t, short old, short events, void *idx_);
+static int win32_dispatch(struct event_base *base, struct timeval *);
+static void win32_dealloc(struct event_base *);
+
+struct eventop win32ops = {
+ "win32",
+ win32_init,
+ win32_add,
+ win32_del,
+ win32_dispatch,
+ win32_dealloc,
+ 0, /* doesn't need reinit */
+ 0, /* No features supported. */
+ sizeof(struct idx_info),
+};
+
+#define FD_SET_ALLOC_SIZE(n) ((sizeof(struct win_fd_set) + ((n)-1)*sizeof(SOCKET)))
+
+static int
+grow_fd_sets(struct win32op *op, unsigned new_num_fds)
+{
+ size_t size;
+
+ EVUTIL_ASSERT(new_num_fds >= op->readset_in->fd_count &&
+ new_num_fds >= op->writeset_in->fd_count);
+ EVUTIL_ASSERT(new_num_fds >= 1);
+
+ size = FD_SET_ALLOC_SIZE(new_num_fds);
+ if (!(op->readset_in = mm_realloc(op->readset_in, size)))
+ return (-1);
+ if (!(op->writeset_in = mm_realloc(op->writeset_in, size)))
+ return (-1);
+ op->resize_out_sets = 1;
+ op->num_fds_in_fd_sets = new_num_fds;
+ return (0);
+}
+
+static int
+do_fd_set(struct win32op *op, struct idx_info *ent, evutil_socket_t s, int read)
+{
+ struct win_fd_set *set = read ? op->readset_in : op->writeset_in;
+ if (read) {
+ if (ent->read_pos_plus1 > 0)
+ return (0);
+ } else {
+ if (ent->write_pos_plus1 > 0)
+ return (0);
+ }
+ if (set->fd_count == op->num_fds_in_fd_sets) {
+ if (grow_fd_sets(op, op->num_fds_in_fd_sets*2))
+ return (-1);
+ /* set pointer will have changed and needs reiniting! */
+ set = read ? op->readset_in : op->writeset_in;
+ }
+ set->fd_array[set->fd_count] = s;
+ if (read)
+ ent->read_pos_plus1 = set->fd_count+1;
+ else
+ ent->write_pos_plus1 = set->fd_count+1;
+ return (set->fd_count++);
+}
+
+static int
+do_fd_clear(struct event_base *base,
+ struct win32op *op, struct idx_info *ent, int read)
+{
+ int i;
+ struct win_fd_set *set = read ? op->readset_in : op->writeset_in;
+ if (read) {
+ i = ent->read_pos_plus1 - 1;
+ ent->read_pos_plus1 = 0;
+ } else {
+ i = ent->write_pos_plus1 - 1;
+ ent->write_pos_plus1 = 0;
+ }
+ if (i < 0)
+ return (0);
+ if (--set->fd_count != (unsigned)i) {
+ struct idx_info *ent2;
+ SOCKET s2;
+ s2 = set->fd_array[i] = set->fd_array[set->fd_count];
+
+ ent2 = evmap_io_get_fdinfo_(&base->io, s2);
+
+ if (!ent2) /* This indicates a bug. */
+ return (0);
+ if (read)
+ ent2->read_pos_plus1 = i+1;
+ else
+ ent2->write_pos_plus1 = i+1;
+ }
+ return (0);
+}
+
+#define NEVENT 32
+void *
+win32_init(struct event_base *base)
+{
+ struct win32op *winop;
+ size_t size;
+ if (!(winop = mm_calloc(1, sizeof(struct win32op))))
+ return NULL;
+ winop->num_fds_in_fd_sets = NEVENT;
+ size = FD_SET_ALLOC_SIZE(NEVENT);
+ if (!(winop->readset_in = mm_malloc(size)))
+ goto err;
+ if (!(winop->writeset_in = mm_malloc(size)))
+ goto err;
+ if (!(winop->readset_out = mm_malloc(size)))
+ goto err;
+ if (!(winop->writeset_out = mm_malloc(size)))
+ goto err;
+ if (!(winop->exset_out = mm_malloc(size)))
+ goto err;
+ winop->readset_in->fd_count = winop->writeset_in->fd_count = 0;
+ winop->readset_out->fd_count = winop->writeset_out->fd_count
+ = winop->exset_out->fd_count = 0;
+
+ if (evsig_init_(base) < 0)
+ winop->signals_are_broken = 1;
+
+ evutil_weakrand_seed_(&base->weakrand_seed, 0);
+
+ return (winop);
+ err:
+ XFREE(winop->readset_in);
+ XFREE(winop->writeset_in);
+ XFREE(winop->readset_out);
+ XFREE(winop->writeset_out);
+ XFREE(winop->exset_out);
+ XFREE(winop);
+ return (NULL);
+}
+
+int
+win32_add(struct event_base *base, evutil_socket_t fd,
+ short old, short events, void *idx_)
+{
+ struct win32op *win32op = base->evbase;
+ struct idx_info *idx = idx_;
+
+ if ((events & EV_SIGNAL) && win32op->signals_are_broken)
+ return (-1);
+
+ if (!(events & (EV_READ|EV_WRITE)))
+ return (0);
+
+ event_debug(("%s: adding event for %d", __func__, (int)fd));
+ if (events & EV_READ) {
+ if (do_fd_set(win32op, idx, fd, 1)<0)
+ return (-1);
+ }
+ if (events & EV_WRITE) {
+ if (do_fd_set(win32op, idx, fd, 0)<0)
+ return (-1);
+ }
+ return (0);
+}
+
+int
+win32_del(struct event_base *base, evutil_socket_t fd, short old, short events,
+ void *idx_)
+{
+ struct win32op *win32op = base->evbase;
+ struct idx_info *idx = idx_;
+
+ event_debug(("%s: Removing event for "EV_SOCK_FMT,
+ __func__, EV_SOCK_ARG(fd)));
+ if (events & EV_READ)
+ do_fd_clear(base, win32op, idx, 1);
+ if (events & EV_WRITE)
+ do_fd_clear(base, win32op, idx, 0);
+
+ return 0;
+}
+
+static void
+fd_set_copy(struct win_fd_set *out, const struct win_fd_set *in)
+{
+ out->fd_count = in->fd_count;
+ memcpy(out->fd_array, in->fd_array, in->fd_count * (sizeof(SOCKET)));
+}
+
+/*
+ static void dump_fd_set(struct win_fd_set *s)
+ {
+ unsigned int i;
+ printf("[ ");
+ for(i=0;i<s->fd_count;++i)
+ printf("%d ",(int)s->fd_array[i]);
+ printf("]\n");
+ }
+*/
+
+int
+win32_dispatch(struct event_base *base, struct timeval *tv)
+{
+ struct win32op *win32op = base->evbase;
+ int res = 0;
+ unsigned j, i;
+ int fd_count;
+ SOCKET s;
+
+ if (win32op->resize_out_sets) {
+ size_t size = FD_SET_ALLOC_SIZE(win32op->num_fds_in_fd_sets);
+ if (!(win32op->readset_out = mm_realloc(win32op->readset_out, size)))
+ return (-1);
+ if (!(win32op->exset_out = mm_realloc(win32op->exset_out, size)))
+ return (-1);
+ if (!(win32op->writeset_out = mm_realloc(win32op->writeset_out, size)))
+ return (-1);
+ win32op->resize_out_sets = 0;
+ }
+
+ fd_set_copy(win32op->readset_out, win32op->readset_in);
+ fd_set_copy(win32op->exset_out, win32op->writeset_in);
+ fd_set_copy(win32op->writeset_out, win32op->writeset_in);
+
+ fd_count =
+ (win32op->readset_out->fd_count > win32op->writeset_out->fd_count) ?
+ win32op->readset_out->fd_count : win32op->writeset_out->fd_count;
+
+ if (!fd_count) {
+ long msec = tv ? evutil_tv_to_msec_(tv) : LONG_MAX;
+ /* Sleep's DWORD argument is unsigned long */
+ if (msec < 0)
+ msec = LONG_MAX;
+ /* Windows doesn't like you to call select() with no sockets */
+ Sleep(msec);
+ return (0);
+ }
+
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+ res = select(fd_count,
+ (struct fd_set*)win32op->readset_out,
+ (struct fd_set*)win32op->writeset_out,
+ (struct fd_set*)win32op->exset_out, tv);
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ event_debug(("%s: select returned %d", __func__, res));
+
+ if (res <= 0) {
+ return res;
+ }
+
+ if (win32op->readset_out->fd_count) {
+ i = evutil_weakrand_range_(&base->weakrand_seed,
+ win32op->readset_out->fd_count);
+ for (j=0; j<win32op->readset_out->fd_count; ++j) {
+ if (++i >= win32op->readset_out->fd_count)
+ i = 0;
+ s = win32op->readset_out->fd_array[i];
+ evmap_io_active_(base, s, EV_READ);
+ }
+ }
+ if (win32op->exset_out->fd_count) {
+ i = evutil_weakrand_range_(&base->weakrand_seed,
+ win32op->exset_out->fd_count);
+ for (j=0; j<win32op->exset_out->fd_count; ++j) {
+ if (++i >= win32op->exset_out->fd_count)
+ i = 0;
+ s = win32op->exset_out->fd_array[i];
+ evmap_io_active_(base, s, EV_WRITE);
+ }
+ }
+ if (win32op->writeset_out->fd_count) {
+ SOCKET s;
+ i = evutil_weakrand_range_(&base->weakrand_seed,
+ win32op->writeset_out->fd_count);
+ for (j=0; j<win32op->writeset_out->fd_count; ++j) {
+ if (++i >= win32op->writeset_out->fd_count)
+ i = 0;
+ s = win32op->writeset_out->fd_array[i];
+ evmap_io_active_(base, s, EV_WRITE);
+ }
+ }
+ return (0);
+}
+
+void
+win32_dealloc(struct event_base *base)
+{
+ struct win32op *win32op = base->evbase;
+
+ evsig_dealloc_(base);
+ if (win32op->readset_in)
+ mm_free(win32op->readset_in);
+ if (win32op->writeset_in)
+ mm_free(win32op->writeset_in);
+ if (win32op->readset_out)
+ mm_free(win32op->readset_out);
+ if (win32op->writeset_out)
+ mm_free(win32op->writeset_out);
+ if (win32op->exset_out)
+ mm_free(win32op->exset_out);
+ /* XXXXX free the tree. */
+
+ memset(win32op, 0, sizeof(*win32op));
+ mm_free(win32op);
+}
+
+#endif