summaryrefslogtreecommitdiff
path: root/libs/libcurl
diff options
context:
space:
mode:
authordartraiden <wowemuh@gmail.com>2019-11-08 10:55:37 +0300
committerdartraiden <wowemuh@gmail.com>2019-11-08 10:55:37 +0300
commit72d615e38510b5a0da501db32c8260daacee0104 (patch)
tree2034f84c9d9a4ca5a2396d923bf151985aff1dbb /libs/libcurl
parent4da5039cd9bed18e59e298f8972f967e99a9149d (diff)
libcurl: update to 7.67
Diffstat (limited to 'libs/libcurl')
-rw-r--r--libs/libcurl/docs/CHANGES3524
-rw-r--r--libs/libcurl/docs/THANKS42
-rw-r--r--libs/libcurl/include/curl/curl.h2
-rw-r--r--libs/libcurl/include/curl/curlver.h8
-rw-r--r--libs/libcurl/include/curl/multi.h6
-rw-r--r--libs/libcurl/include/curl/urlapi.h2
-rw-r--r--libs/libcurl/src/Makefile.in31
-rw-r--r--libs/libcurl/src/Makefile.inc4
-rw-r--r--libs/libcurl/src/Makefile.netware2
-rw-r--r--libs/libcurl/src/altsvc.c20
-rw-r--r--libs/libcurl/src/asyn-thread.c28
-rw-r--r--libs/libcurl/src/checksrc.pl2
-rw-r--r--libs/libcurl/src/config-amigaos.h4
-rw-r--r--libs/libcurl/src/config-os400.h6
-rw-r--r--libs/libcurl/src/config-plan9.h1
-rw-r--r--libs/libcurl/src/config-riscos.h8
-rw-r--r--libs/libcurl/src/config-symbian.h3
-rw-r--r--libs/libcurl/src/config-tpf.h6
-rw-r--r--libs/libcurl/src/config-vxworks.h3
-rw-r--r--libs/libcurl/src/config-win32.h4
-rw-r--r--libs/libcurl/src/conncache.c8
-rw-r--r--libs/libcurl/src/connect.c15
-rw-r--r--libs/libcurl/src/cookie.c19
-rw-r--r--libs/libcurl/src/cookie.h2
-rw-r--r--libs/libcurl/src/curl_config.h.cmake3
-rw-r--r--libs/libcurl/src/curl_config.h.in6
-rw-r--r--libs/libcurl/src/doh.c58
-rw-r--r--libs/libcurl/src/easy.c7
-rw-r--r--libs/libcurl/src/ftp.c396
-rw-r--r--libs/libcurl/src/ftp.h6
-rw-r--r--libs/libcurl/src/ftplistparser.c2
-rw-r--r--libs/libcurl/src/hostcheck.c2
-rw-r--r--libs/libcurl/src/hostip.c2
-rw-r--r--libs/libcurl/src/http.c17
-rw-r--r--libs/libcurl/src/http.h5
-rw-r--r--libs/libcurl/src/http2.c49
-rw-r--r--libs/libcurl/src/http_chunks.c28
-rw-r--r--libs/libcurl/src/http_chunks.h13
-rw-r--r--libs/libcurl/src/http_proxy.c9
-rw-r--r--libs/libcurl/src/imap.c5
-rw-r--r--libs/libcurl/src/ldap.c24
-rw-r--r--libs/libcurl/src/libcurl.plist6
-rw-r--r--libs/libcurl/src/mime.c19
-rw-r--r--libs/libcurl/src/mime.h6
-rw-r--r--libs/libcurl/src/multi.c18
-rw-r--r--libs/libcurl/src/multihandle.h1
-rw-r--r--libs/libcurl/src/multiif.h6
-rw-r--r--libs/libcurl/src/netrc.c2
-rw-r--r--libs/libcurl/src/non-ascii.c2
-rw-r--r--libs/libcurl/src/parsedate.c14
-rw-r--r--libs/libcurl/src/security.c2
-rw-r--r--libs/libcurl/src/setopt.c21
-rw-r--r--libs/libcurl/src/setup-os400.h6
-rw-r--r--libs/libcurl/src/smb.c3
-rw-r--r--libs/libcurl/src/socketpair.c118
-rw-r--r--libs/libcurl/src/socketpair.h36
-rw-r--r--libs/libcurl/src/socks.c64
-rw-r--r--libs/libcurl/src/strcase.c86
-rw-r--r--libs/libcurl/src/strcase.h4
-rw-r--r--libs/libcurl/src/transfer.c14
-rw-r--r--libs/libcurl/src/url.c61
-rw-r--r--libs/libcurl/src/url.h2
-rw-r--r--libs/libcurl/src/urlapi.c156
-rw-r--r--libs/libcurl/src/urldata.h393
-rw-r--r--libs/libcurl/src/vauth/vauth.h2
-rw-r--r--libs/libcurl/src/version.c15
-rw-r--r--libs/libcurl/src/vquic/ngtcp2.c1593
-rw-r--r--libs/libcurl/src/vquic/ngtcp2.h63
-rw-r--r--libs/libcurl/src/vquic/quiche.c783
-rw-r--r--libs/libcurl/src/vquic/quiche.h49
-rw-r--r--libs/libcurl/src/vssh/libssh.c6
-rw-r--r--libs/libcurl/src/vssh/libssh2.c4
-rw-r--r--libs/libcurl/src/vtls/gskit.c102
-rw-r--r--libs/libcurl/src/vtls/gtls.c6
-rw-r--r--libs/libcurl/src/vtls/mbedtls.c7
-rw-r--r--libs/libcurl/src/vtls/mesalink.c7
-rw-r--r--libs/libcurl/src/vtls/nss.c2
-rw-r--r--libs/libcurl/src/vtls/openssl.c32
-rw-r--r--libs/libcurl/src/vtls/polarssl.c4
-rw-r--r--libs/libcurl/src/vtls/schannel.c12
-rw-r--r--libs/libcurl/src/vtls/schannel_verify.c2
-rw-r--r--libs/libcurl/src/vtls/sectransp.c6
-rw-r--r--libs/libcurl/src/vtls/vtls.c5
83 files changed, 5211 insertions, 2911 deletions
diff --git a/libs/libcurl/docs/CHANGES b/libs/libcurl/docs/CHANGES
index 0047ab41ac..d35f541998 100644
--- a/libs/libcurl/docs/CHANGES
+++ b/libs/libcurl/docs/CHANGES
@@ -6,6 +6,1505 @@
Changelog
+Version 7.67.0 (5 Nov 2019)
+
+Daniel Stenberg (5 Nov 2019)
+- RELEASE-NOTES: synced
+
+ The 7.67.0 release
+
+- THANKS: add new names from 7.67.0
+
+- configure: only say ipv6 enabled when the variable is set
+
+ Previously it could say "IPv6: enabled" at the end of the configure run
+ but the define wasn't set because of a missing getaddrinfo().
+
+ Reported-by: Marcel Raad
+ Fixes #4555
+ Closes #4560
+
+Marcel Raad (2 Nov 2019)
+- certs/Server-localhost-lastSAN-sv: regenerate with sha256
+
+ All other certificates were regenerated in commit ba782baac30, but
+ this one was missed.
+ Fixes test3001 on modern systems.
+
+ Closes https://github.com/curl/curl/pull/4551
+
+Daniel Stenberg (2 Nov 2019)
+- [Vilhelm Prytz brought this change]
+
+ copyrights: update all copyright notices to 2019 on files changed this year
+
+ Closes #4547
+
+- [Bastien Bouclet brought this change]
+
+ mbedtls: add error message for cert validity starting in the future
+
+ Closes #4552
+
+Jay Satiro (1 Nov 2019)
+- schannel_verify: Fix concurrent openings of CA file
+
+ - Open the CA file using FILE_SHARE_READ mode so that others can read
+ from it as well.
+
+ Prior to this change our schannel code opened the CA file without
+ sharing which meant concurrent openings (eg an attempt from another
+ thread or process) would fail during the time it was open without
+ sharing, which in curl's case would cause error:
+ "schannel: failed to open CA file".
+
+ Bug: https://curl.haxx.se/mail/lib-2019-10/0104.html
+ Reported-by: Richard Alcock
+
+Daniel Stenberg (31 Oct 2019)
+- gtls: make gnutls_bye() not wait for response on shutdown
+
+ ... as it can make it wait there for a long time for no good purpose.
+
+ Patched-by: Jay Satiro
+ Reported-by: Bylon2 on github
+ Adviced-by: Nikos Mavrogiannopoulos
+
+ Fixes #4487
+ Closes #4541
+
+- [Michał Janiszewski brought this change]
+
+ appveyor: publish artifacts on appveyor
+
+ This allows obtaining upstream builds of curl directly from appveyor for
+ all the available configurations
+
+ Closes #4509
+
+- url: make Curl_close() NULLify the pointer too
+
+ This is the common pattern used in the code and by a unified approach we
+ avoid mistakes.
+
+ Closes #4534
+
+- [Trivikram Kamat brought this change]
+
+ INSTALL: add missing space for configure commands
+
+ Closes #4539
+
+- url: Curl_free_request_state() should also free doh handles
+
+ ... or risk DoH memory leaks.
+
+ Reported-by: Paul Dreik
+ Fixes #4463
+ Closes #4527
+
+- examples: remove the "this exact code has not been verified"
+
+ ... as really confuses the reader to not know what to believe!
+
+- [Trivikram Kamat brought this change]
+
+ HTTP3: fix typo somehere1 > somewhere1
+
+ Closes #4535
+
+Jay Satiro (28 Oct 2019)
+- [Javier Blazquez brought this change]
+
+ HTTP3: fix invalid use of sendto for connected UDP socket
+
+ On macOS/BSD, trying to call sendto on a connected UDP socket fails
+ with a EISCONN error. Because the singleipconnect has already called
+ connect on the socket when we're trying to use it for QUIC transfers
+ we need to use plain send instead.
+
+ Fixes #4529
+ Closes https://github.com/curl/curl/pull/4533
+
+Daniel Stenberg (28 Oct 2019)
+- RELEASE-NOTES: synced
+
+- [Javier Blazquez brought this change]
+
+ HTTP3: fix Windows build
+
+ The ngtcp2 QUIC backend was using the MSG_DONTWAIT flag for send/recv
+ in order to perform nonblocking operations. On Windows this flag does
+ not exist. Instead, the socket must be set to nonblocking mode via
+ ioctlsocket.
+
+ This change sets the nonblocking flag on UDP sockets used for QUIC on
+ all platforms so the use of MSG_DONTWAIT is not needed.
+
+ Fixes #4531
+ Closes #4532
+
+Marcel Raad (27 Oct 2019)
+- appveyor: add --disable-proxy autotools build
+
+ This would have caught issue #3926.
+
+ Also make formatting more consistent.
+
+ Closes https://github.com/curl/curl/pull/4526
+
+Daniel Stenberg (25 Oct 2019)
+- appveyor: make winbuilds with DEBUG=no/yes and VS 2015/2017
+
+ ... and invoke "curl -V" once done
+
+ Co-Authored-By: Jay Satiro
+
+ Closes #4523
+
+- [Francois Rivard brought this change]
+
+ schannel: reverse the order of certinfo insertions
+
+ Fixes #4518
+ Closes #4519
+
+Marcel Raad (24 Oct 2019)
+- test1591: fix spelling of http feature
+
+ The test never got run because the feature name is `http` in lowercase.
+
+ Closes https://github.com/curl/curl/pull/4520
+
+Daniel Stenberg (23 Oct 2019)
+- [Michał Janiszewski brought this change]
+
+ appveyor: Use two parallel compilation on appveyor with CMake
+
+ Appveyor provides 2 CPUs for each builder[1], make sure to use parallel
+ compilation, when running with CMake. CMake learned this new option in
+ version 3.12[2] and the version provided by appveyor is fresh enough.
+
+ Curl doesn't really take that long to build and it is using the slowest
+ builder available, msbuild, so expect only a moderate improvement in
+ build times.
+
+ [1] https://www.appveyor.com/docs/build-environment/
+ [2] https://cmake.org/cmake/help/v3.12/release/3.12.html
+
+ Closes #4508
+
+- conn-reuse: requests wanting NTLM can reuse non-NTLM connections
+
+ Added test case 338 to verify.
+
+ Reported-by: Daniel Silverstone
+ Fixes #4499
+ Closes #4514
+
+Marcel Raad (23 Oct 2019)
+- tests: add missing proxy features
+
+Daniel Stenberg (22 Oct 2019)
+- RELEASE-NOTES: synced
+
+Marcel Raad (21 Oct 2019)
+- tests: use %FILE_PWD for file:// URLs
+
+ This way, we always have exactly one slash after the host name, making
+ the tests pass when curl is compiled with the MSYS GCC.
+
+ Closes https://github.com/curl/curl/pull/4512
+
+- tests: add `connect to non-listen` keywords
+
+ These tests try to connect to ports nothing is listening on.
+
+ Closes https://github.com/curl/curl/pull/4511
+
+- runtests: get textaware info from curl instead of perl
+
+ The MSYS system on Windows can run the test suite for curl built with
+ any toolset. When built with the MSYS GCC, curl uses Unix line endings,
+ while it uses Windows line endings when built with the MinGW GCC, and
+ `^O` reports 'msys' in both cases. Use the curl executable itself to
+ determine the line endings instead, which reports 'x86_64-pc-msys' when
+ built with the MSYS GCC.
+
+ Closes https://github.com/curl/curl/pull/4506
+
+Daniel Stenberg (20 Oct 2019)
+- [Michał Janiszewski brought this change]
+
+ appveyor: Add MSVC ARM64 build
+
+ Closes #4507
+
+- http2_recv: a closed stream trumps pause state
+
+ ... and thus should return 0, not EAGAIN.
+
+ Reported-by: Tom van der Woerdt
+ Fixes #4496
+ Closes #4505
+
+- http2: expire a timeout at end of stream
+
+ To make sure that transfer is being dealt with. Streams without
+ Content-Length need a final read to notice the end-of-stream state.
+
+ Reported-by: Tom van der Woerdt
+ Fixes #4496
+
+Dan Fandrich (18 Oct 2019)
+- travis: Add an ARM64 build
+
+ Test 323 is failing for some reason, so disable it there for now.
+
+Marcel Raad (18 Oct 2019)
+- examples/sslbackend: fix -Wchar-subscripts warning
+
+ With the `isdigit` implementation that comes with MSYS2, the argument
+ is used as an array subscript, resulting in a -Wchar-subscripts
+ warning. `isdigit`'s behavior is undefined if the argument is negative
+ and not EOF [0]. As done in lib/curl_ctype.h, cast the `char` variable
+ to `unsigned char` to avoid that.
+
+ [0] https://en.cppreference.com/w/c/string/byte/isdigit
+
+ Closes https://github.com/curl/curl/pull/4503
+
+Daniel Stenberg (18 Oct 2019)
+- configure: remove all cyassl references
+
+ In particular, this removes the case where configure would find an old
+ cyall installation rather than a wolfssl one if present. The library is
+ named wolfssl in modern days so there's no real need to keep support for
+ the former.
+
+ Reported-by: Jacob Barthelmeh
+ Closes #4502
+
+Marcel Raad (17 Oct 2019)
+- test1162: disable MSYS2's POSIX path conversion
+
+ This avoids MSYS2 converting the backslasb in the URL to a slash,
+ causing the test to fail.
+
+Daniel Stenberg (17 Oct 2019)
+- RELEASE-NOTES: synced
+
+Jay Satiro (16 Oct 2019)
+- CURLOPT_TIMEOUT.3: Clarify transfer timeout time includes queue time
+
+ Prior to this change some users did not understand that the "request"
+ starts when the handle is added to the multi handle, or probably they
+ did not understand that some of those transfers may be queued and that
+ time is included in timeout.
+
+ Reported-by: Jeroen Ooms
+
+ Fixes https://github.com/curl/curl/issues/4486
+ Closes https://github.com/curl/curl/pull/4489
+
+- [Stian Soiland-Reyes brought this change]
+
+ tool_operate: Fix retry sleep time shown to user when Retry-After
+
+ - If server header Retry-After is being used for retry sleep time then
+ show that value to the user instead of the normal retry sleep time.
+
+ This is a follow-up to 640b973 (7.66.0) which changed curl tool so that
+ the value from Retry-After header overrides other retry timing options.
+
+ Closes https://github.com/curl/curl/pull/4498
+
+Daniel Stenberg (16 Oct 2019)
+- url: normalize CURLINFO_EFFECTIVE_URL
+
+ The URL extracted with CURLINFO_EFFECTIVE_URL was returned as given as
+ input in most cases, which made it not get a scheme prefixed like before
+ if the URL was given without one, and it didn't remove dotdot sequences
+ etc.
+
+ Added test case 1907 to verify that this now works as intended and as
+ before 7.62.0.
+
+ Regression introduced in 7.62.0
+
+ Reported-by: Christophe Dervieux
+ Fixes #4491
+ Closes #4493
+
+Marcel Raad (16 Oct 2019)
+- tests: line ending fixes for Windows
+
+ Mark some files as text.
+
+ Closes https://github.com/curl/curl/pull/4490
+
+- tests: use proxy feature
+
+ This makes the tests succeed when using --disable-proxy.
+
+ Closes https://github.com/curl/curl/pull/4488
+
+- smbserver: fix Python 3 compatibility
+
+ Python 2's `ConfigParser` module is spelled `configparser` in Python 3.
+
+ Closes https://github.com/curl/curl/pull/4484
+
+- security: silence conversion warning
+
+ With MinGW-w64, `curl_socket_t` is is a 32 or 64 bit unsigned integer,
+ while `read` expects a 32 bit signed integer.
+ Use `sread` instead of `read` to use the correct parameter type.
+
+ Closes https://github.com/curl/curl/pull/4483
+
+- connect: silence sign-compare warning
+
+ With MinGW-w64 using WinSock, `curl_socklen_t` is signed, while the
+ result of `sizeof` is unsigned.
+
+ Closes https://github.com/curl/curl/pull/4483
+
+Daniel Stenberg (13 Oct 2019)
+- TODO: Handle growing SFTP files
+
+ Closes #4344
+
+- KNOWN_BUGS: remove "CURLFORM_CONTENTLEN in an array"
+
+ The curl_formadd() function is deprecated and shouldn't be used so the
+ real fix for applications is to switch to the curl_mime_* API.
+
+- KNOWN_BUGS: "LDAP on Windows does authentication wrong"
+
+ Closes #3116
+
+- appveyor: add a winbuild that uses VS2017
+
+ Closes #4482
+
+- [Harry Sintonen brought this change]
+
+ socketpair: fix include and define for older TCP header systems
+
+ fixed build for systems that need netinet/in.h for IPPROTO_TCP and are
+ missing INADDR_LOOPBACK
+
+ Closes #4480
+
+- socketpair: fix double-close in error case
+
+ Follow-up to bc2dbef0afc08
+
+- gskit: use the generic Curl_socketpair
+
+- asyn-thread: make use of Curl_socketpair() where available
+
+- socketpair: an implemention for Windows and more
+
+ Curl_socketpair() is designed to be used and work everywhere if there's
+ no native version or the native version isn't good enough.
+
+ Closes #4466
+
+- RELEASE-NOTES: synced
+
+- connect: return CURLE_OPERATION_TIMEDOUT for errno == ETIMEDOUT
+
+ Previosly all connect() failures would return CURLE_COULDNT_CONNECT, no
+ matter what errno said.
+
+ This makes for example --retry work on these transfer failures.
+
+ Reported-by: Nathaniel J. Smith
+ Fixes #4461
+ Clsoes #4462
+
+- cirrus: switch off blackhole status on the freebsd CI machines
+
+- tests: use port 2 instead of 60000 for a safer non-listening port
+
+ ... when the tests want "connection refused".
+
+- KNOWN_BUGS: IDN tests failing on Windows
+
+ Closes #3747
+
+Dan Fandrich (9 Oct 2019)
+- cirrus: Increase the git clone depth.
+
+ If more commits are submitted to master between the time of triggering
+ the first Cirrus build and the time the final build gets started, the
+ desired commit is no longer at HEAD and the build will error out.
+ [skip ci]
+
+Daniel Stenberg (9 Oct 2019)
+- docs: make sure the --no-progress-meter docs file is in dist too
+
+- docs: document it as --no-progress-meter instead of the reverse
+
+ Follow-up to 93373a960c3bb4
+
+ Reported-by: infinnovation-dev on github
+ Fixes #4474
+ Closes #4475
+
+Dan Fandrich (9 Oct 2019)
+- cirrus: Switch the FreeBSD 11.x build to 11.3 and add a 13.0 build.
+
+ Also, select the images using image_family to get the latest snapshots
+ automatically.
+ [skip ci]
+
+Daniel Stenberg (8 Oct 2019)
+- curl: --no-progress-meter
+
+ New option that allows a user to ONLY switch off curl's progress meter
+ and leave everything else in "talkative" mode.
+
+ Reported-by: Piotr Komborski
+ Fixes #4422
+ Closes #4470
+
+- TODO: Consult %APPDATA% also for .netrc
+
+ Closes #4016
+
+- CURLOPT_TIMEOUT.3: remove the mention of "minutes"
+
+ ... just say that limiting operations risk aborting otherwise fine
+ working transfers. If that means seconds, minutes or hours, we leave to
+ the user.
+
+ Reported-by: Martin Gartner
+ Closes #4469
+
+- [Andrei Valeriu BICA brought this change]
+
+ docs: added multi-event.c example
+
+ Similar to multi-uv.c but using libevent 2. This is a simpler libevent
+ integration example then hiperfifo.c.
+
+ Closes #4471
+
+Jay Satiro (5 Oct 2019)
+- [Nicolas brought this change]
+
+ ldap: fix OOM error on missing query string
+
+ - Allow missing queries, don't return NO_MEMORY error in such a case.
+
+ It is acceptable for there to be no specified query string, for example:
+
+ curl ldap://ldap.forumsys.com
+
+ A regression bug in 1b443a7 caused this issue.
+
+ This is a partial fix for #4261.
+
+ Bug: https://github.com/curl/curl/issues/4261#issuecomment-525543077
+ Reported-by: Jojojov@users.noreply.github.com
+ Analyzed-by: Samuel Surtees
+
+ Closes https://github.com/curl/curl/pull/4467
+
+- [Paul B. Omta brought this change]
+
+ build: Remove unused HAVE_LIBSSL and HAVE_LIBCRYPTO defines
+
+ Closes https://github.com/curl/curl/pull/4460
+
+Daniel Stenberg (5 Oct 2019)
+- RELEASE-NOTES: synced
+
+- [Stian Soiland-Reyes brought this change]
+
+ curl: ensure HTTP 429 triggers --retry
+
+ This completes #3794.
+
+ Also make sure the new tests from #4195 are enabled
+
+ Closes #4465
+
+Marcel Raad (4 Oct 2019)
+- [apique brought this change]
+
+ winbuild: add ENABLE_UNICODE option
+
+ Fixes https://github.com/curl/curl/issues/4308
+ Closes https://github.com/curl/curl/pull/4309
+
+Daniel Stenberg (4 Oct 2019)
+- ngtcp2: adapt to API change
+
+ Closes #4457
+
+- cookies: change argument type for Curl_flush_cookies
+
+ The second argument is really a 'bool' so use that and pass in TRUE/FALSE
+ to make it clear.
+
+ Closes #4455
+
+- http2: move state-init from creation to pre-transfer
+
+ To make sure that the HTTP/2 state is initialized correctly for
+ duplicated handles. It would otherwise easily generate "spurious"
+ PRIORITY frames to get sent over HTTP/2 connections when duplicated easy
+ handles were used.
+
+ Reported-by: Daniel Silverstone
+ Fixes #4303
+ Closes #4442
+
+- urlapi: fix use-after-free bug
+
+ Follow-up from 2c20109a9b5d04
+
+ Added test 663 to verify.
+
+ Reported by OSS-Fuzz
+ Bug: https://crbug.com/oss-fuzz/17954
+
+ Closes #4453
+
+- [Paul Dreik brought this change]
+
+ cookie: avoid harmless use after free
+
+ This fix removes a use after free which can be triggered by
+ the internal cookie fuzzer, but otherwise is probably
+ impossible to trigger from an ordinary application.
+
+ The following program reproduces it:
+
+ curl_global_init(CURL_GLOBAL_DEFAULT);
+ CURL* handle=curl_easy_init();
+ CookieInfo* info=Curl_cookie_init(handle,NULL,NULL,false);
+ curl_easy_setopt(handle, CURLOPT_COOKIEJAR, "/dev/null");
+ Curl_flush_cookies(handle, true);
+ Curl_cookie_cleanup(info);
+ curl_easy_cleanup(handle);
+ curl_global_cleanup();
+
+ This was found through fuzzing.
+
+ Closes #4454
+
+- [Denis Chaplygin brought this change]
+
+ docs: add note on failed handles not being counted by curl_multi_perform
+
+ Closes #4446
+
+- CURLMOPT_MAX_CONCURRENT_STREAMS.3: fix SEE ALSO typo
+
+- [Niall brought this change]
+
+ ESNI: initial build/setup
+
+ Closes #4011
+
+- RELEASE-NOTES: synced
+
+- redirect: when following redirects to an absolute URL, URL encode it
+
+ ... to make it handle for example (RFC violating) embeded spaces.
+
+ Reported-by: momala454 on github
+ Fixes #4445
+ Closes #4447
+
+- urlapi: fix URL encoding when setting a full URL
+
+- tool_operate: rename functions to make more sense
+
+- curl: create easy handles on-demand and not ahead of time
+
+ This should again enable crazy-large download ranges of the style
+ [1-10000000] that otherwise easily ran out of memory starting in 7.66.0
+ when this new handle allocating scheme was introduced.
+
+ Reported-by: Peter Sumatra
+ Fixes #4393
+ Closes #4438
+
+- [Kunal Ekawde brought this change]
+
+ CURLMOPT_MAX_CONCURRENT_STREAMS: new setopt
+
+ Closes #4410
+
+- chunked-encoding: stop hiding the CURLE_BAD_CONTENT_ENCODING error
+
+ Unknown content-encoding would get returned as CURLE_WRITE_ERROR if the
+ response is chunked-encoded.
+
+ Reported-by: Ilya Kosarev
+ Fixes #4310
+ Closes #4449
+
+Marcel Raad (1 Oct 2019)
+- checksrc: fix uninitialized variable warning
+
+ The loop doesn't need to be executed without a file argument.
+
+ Closes https://github.com/curl/curl/pull/4444
+
+- urlapi: fix unused variable warning
+
+ `dest` is only used with `ENABLE_IPV6`.
+
+ Closes https://github.com/curl/curl/pull/4444
+
+- lib: silence conversion warnings
+
+ Closes https://github.com/curl/curl/pull/4444
+
+- AppVeyor: add 32-bit MinGW-w64 build
+
+ With WinSSL and testing enabled so that it would have detected most of
+ the warnings fixed in [0] and [1].
+
+ [0] https://github.com/curl/curl/pull/4398
+ [1] https://github.com/curl/curl/pull/4415
+
+ Closes https://github.com/curl/curl/pull/4433
+
+- AppVeyor: remove MSYS2_ARG_CONV_EXCL for winbuild
+
+ It's only used for MSYS2 with MinGW.
+
+ Closes
+
+Daniel Stenberg (30 Sep 2019)
+- [Emil Engler brought this change]
+
+ git: add tests/server/disabled to .gitignore
+
+ Closes #4441
+
+- altsvc: accept quoted ma and persist values
+
+ As mandated by the spec. Test 1654 is extended to verify.
+
+ Closes #4443
+
+- mailmap: a Lucas fix
+
+Alessandro Ghedini (29 Sep 2019)
+- [Lucas Pardue brought this change]
+
+ quiche: update HTTP/3 config creation to new API
+
+Daniel Stenberg (29 Sep 2019)
+- BINDINGS: PureBasic, Net::Curl for perl and Nim
+
+- BINDINGS: Kapito is an Erlang library, basically a binding
+
+- BINDINGS: added clj-curl
+
+ Reported-by: Lucas Severo
+
+- [Jay Satiro brought this change]
+
+ docs: disambiguate CURLUPART_HOST is for host name (ie no port)
+
+ Closes #4424
+
+- cookies: using a share with cookies shouldn't enable the cookie engine
+
+ The 'share object' only sets the storage area for cookies. The "cookie
+ engine" still needs to be enabled or activated using the normal cookie
+ options.
+
+ This caused the curl command line tool to accidentally use cookies
+ without having been told to, since curl switched to using shared cookies
+ in 7.66.0.
+
+ Test 1166 verifies
+
+ Updated test 506
+
+ Fixes #4429
+ Closes #4434
+
+- setopt: handle ALTSVC set to NULL
+
+- RELEASE-NOTES: synced
+
+- [grdowns brought this change]
+
+ INSTALL: add vcpkg installation instructions
+
+ Closes #4435
+
+- [Zenju brought this change]
+
+ FTP: add test for FTPFILE_NOCWD: Avoid redundant CWDs
+
+ Add libtest 661
+
+ Closes #4417
+
+- [Zenju brought this change]
+
+ FTP: url-decode path before evaluation
+
+ Closes #4428
+
+Marcel Raad (27 Sep 2019)
+- tests: fix narrowing conversion warnings
+
+ `timediff_t` is 64 bits wide also on 32-bit systems since
+ commit b1616dad8f0.
+
+ Closes https://github.com/curl/curl/pull/4415
+
+Jay Satiro (27 Sep 2019)
+- [julian brought this change]
+
+ vtls: Fix comment typo about macosx-version-min compiler flag
+
+ Closes https://github.com/curl/curl/pull/4425
+
+Daniel Stenberg (26 Sep 2019)
+- [Yechiel Kalmenson brought this change]
+
+ README: minor grammar fix
+
+ Closes #4431
+
+- [Spezifant brought this change]
+
+ HTTP3: fix prefix parameter for ngtcp2 build
+
+ Closes #4430
+
+- quiche: don't close connection at end of stream!
+
+- quiche: set 'drain' when returning without having drained the queues
+
+- Revert "FTP: url-decode path before evaluation"
+
+ This reverts commit 2f036a72d543e96128bd75cb0fedd88815fd42e2.
+
+- HTTP3: merged and simplified the two 'running' sections
+
+- HTTP3: show an --alt-svc using example too
+
+- [Zenju brought this change]
+
+ FTP: url-decode path before evaluation
+
+ Closes #4423
+
+- openssl: use strerror on SSL_ERROR_SYSCALL
+
+ Instead of showing the somewhat nonsensical errno number, use strerror()
+ to provide a more relatable error message.
+
+ Closes #4411
+
+- HTTP3: update quic.aiortc.org + add link to server list
+
+ Reported-by: Jeremy Lainé
+
+Jay Satiro (26 Sep 2019)
+- url: don't set appconnect time for non-ssl/non-ssh connections
+
+ Prior to this change non-ssl/non-ssh connections that were reused set
+ TIMER_APPCONNECT [1]. Arguably that was incorrect since no SSL/SSH
+ handshake took place.
+
+ [1]: TIMER_APPCONNECT is publicly known as CURLINFO_APPCONNECT_TIME in
+ libcurl and %{time_appconnect} in the curl tool. It is documented as
+ "the time until the SSL/SSH handshake is completed".
+
+ Reported-by: Marcel Hernandez
+
+ Ref: https://github.com/curl/curl/issues/3760
+
+ Closes https://github.com/curl/curl/pull/3773
+
+Daniel Stenberg (25 Sep 2019)
+- ngtcp2: remove fprintf() calls
+
+ - convert some of them to H3BUF() calls to infof()
+ - remove some of them completely
+ - made DEBUG_HTTP3 defined only if CURLDEBUG is set for now
+
+ Closes #4421
+
+- [Jay Satiro brought this change]
+
+ url: fix the NULL hostname compiler warning case
+
+ Closes #4403
+
+- [Jay Satiro brought this change]
+
+ travis: move the go install to linux-only
+
+ ... to repair the build again
+ Closes #4403
+
+- altsvc: correct the #ifdef for the ngtcp2 backend
+
+- altsvc: save h3 as h3-23
+
+ Follow-up to d176a2c7e5
+
+- urlapi: question mark within fragment is still fragment
+
+ The parser would check for a query part before fragment, which caused it
+ to do wrong when the fragment contains a question mark.
+
+ Extended test 1560 to verify.
+
+ Reported-by: Alex Konev
+ Fixes #4412
+ Closes #4413
+
+- [Alex Samorukov brought this change]
+
+ HTTP3.md: move -p for mkdir, remove -j for make
+
+ - mkdir on OSX/Darwin requires `-p` argument before dir
+
+ - portabbly figuring out number of cores is an exercise for somewhere
+ else
+
+ Closes #4407
+
+Patrick Monnerat (24 Sep 2019)
+- os400: getpeername() and getsockname() return ebcdic AF_UNIX sockaddr,
+
+ As libcurl now uses these 2 system functions, wrappers are needed on os400
+ to convert returned AF_UNIX sockaddrs to ascii.
+
+ This is a follow-up to commit 7fb54ef.
+ See also #4037.
+ Closes #4214
+
+Jay Satiro (24 Sep 2019)
+- [Lucas Pardue brought this change]
+
+ strcase: fix raw lowercasing the letter X
+
+ Casing mistake in Curl_raw_tolower 'X' wasn't lowercased as 'x' prior to
+ this change.
+
+ Follow-up to 0023fce which added the function several days ago.
+
+ Ref: https://github.com/curl/curl/pull/4401#discussion_r327396546
+
+ Closes https://github.com/curl/curl/pull/4408
+
+Daniel Stenberg (23 Sep 2019)
+- http2: Expression 'stream->stream_id != - 1' is always true
+
+ PVS-Studio warning
+ Fixes #4402
+
+- http2: A value is being subtracted from the unsigned variable
+
+ PVS-Studio warning
+ Fixes #4402
+
+- libssh: part of conditional expression is always true: !result
+
+ PVS-Studio warning
+ Fixed #4402
+
+- libssh: part of conditional expression is always true
+
+ PVS-Studio warning
+ Fixes #4402
+
+- libssh: The expression is excessive or contains a misprint
+
+ PVS-Studio warning
+ Fixes #4402
+
+- quiche: The expression must be surrounded by parentheses
+
+ PVS-Studio warning
+ Fixes #4402
+
+- vauth: The parameter 'status' must be surrounded by parentheses
+
+ PVS-Studio warning
+ Fixes #4402
+
+- [Paul Dreik brought this change]
+
+ doh: allow only http and https in debug mode
+
+ Otherwise curl may be told to use for instance pop3 to
+ communicate with the doh server, which most likely
+ is not what you want.
+
+ Found through fuzzing.
+
+ Closes #4406
+
+- [Paul Dreik brought this change]
+
+ doh: return early if there is no time left
+
+ Closes #4406
+
+- [Barry Pollard brought this change]
+
+ http: lowercase headernames for HTTP/2 and HTTP/3
+
+ Closes #4401
+ Fixes #4400
+
+Marcel Raad (23 Sep 2019)
+- vtls: fix narrowing conversion warnings
+
+ Curl_timeleft returns `timediff_t`, which is 64 bits wide also on
+ 32-bit systems since commit b1616dad8f0.
+
+ Closes https://github.com/curl/curl/pull/4398
+
+Daniel Stenberg (23 Sep 2019)
+- [Joel Depooter brought this change]
+
+ winbuild: Add manifest to curl.exe for proper OS version detection
+
+ This is a small fix to commit ebd213270a017a6830928ee2e1f4a9cabc799898
+ in pull request #1221. That commit added the CURL_EMBED_MANIFEST flag to
+ CURL_RC_FLAGS. However, later in the file CURL_RC_FLAGS is
+ overwritten. The fix is to append values to CURL_RC_FLAGS instead of
+ overwriting
+
+ Closes #4399
+
+- RELEASE-NOTES: synced
+
+Marcel Raad (22 Sep 2019)
+- openssl: fix compiler warning with LibreSSL
+
+ It was already fixed for BoringSSL in commit a0f8fccb1e0.
+ LibreSSL has had the second argument to SSL_CTX_set_min_proto_version
+ as uint16_t ever since the function was added in [0].
+
+ [0] https://github.com/libressl-portable/openbsd/commit/56f107201baefb5533486d665a58d8f57fd3aeda
+
+ Closes https://github.com/curl/curl/pull/4397
+
+Daniel Stenberg (22 Sep 2019)
+- curl: exit the create_transfers loop on errors
+
+ When looping around the ranges and given URLs to create transfers, all
+ errors should exit the loop and return. Previously it would keep
+ looping.
+
+ Reported-by: SumatraPeter on github
+ Bug: #4393
+ Closes #4396
+
+Jay Satiro (21 Sep 2019)
+- socks: Fix destination host shown on SOCKS5 error
+
+ Prior to this change when a server returned a socks5 connect error then
+ curl would parse the destination address:port from that data and show it
+ to the user as the destination:
+
+ curld -v --socks5 10.0.3.1:1080 http://google.com:99
+ * SOCKS5 communication to google.com:99
+ * SOCKS5 connect to IPv4 172.217.12.206 (locally resolved)
+ * Can't complete SOCKS5 connection to 253.127.0.0:26673. (1)
+ curl: (7) Can't complete SOCKS5 connection to 253.127.0.0:26673. (1)
+
+ That's incorrect because the address:port included in the connect error
+ is actually a bind address:port (typically unused) and not the
+ destination address:port. This fix changes curl to show the destination
+ information that curl sent to the server instead:
+
+ curld -v --socks5 10.0.3.1:1080 http://google.com:99
+ * SOCKS5 communication to google.com:99
+ * SOCKS5 connect to IPv4 172.217.7.14:99 (locally resolved)
+ * Can't complete SOCKS5 connection to 172.217.7.14:99. (1)
+ curl: (7) Can't complete SOCKS5 connection to 172.217.7.14:99. (1)
+
+ curld -v --socks5-hostname 10.0.3.1:1080 http://google.com:99
+ * SOCKS5 communication to google.com:99
+ * SOCKS5 connect to google.com:99 (remotely resolved)
+ * Can't complete SOCKS5 connection to google.com:99. (1)
+ curl: (7) Can't complete SOCKS5 connection to google.com:99. (1)
+
+ Ref: https://tools.ietf.org/html/rfc1928#section-6
+
+ Closes https://github.com/curl/curl/pull/4394
+
+Daniel Stenberg (21 Sep 2019)
+- travis: enable ngtcp2 h3-23 builds
+
+- altsvc: both backends run h3-23 now
+
+ Closes #4395
+
+- http: fix warning on conversion from int to bit
+
+ Follow-up from 03ebe66d70
+
+- urldata: use 'bool' for the bit type on MSVC compilers
+
+ Closes #4387
+ Fixes #4379
+
+- appveyor: upgrade VS2017 to VS2019
+
+ Closes #4383
+
+- [Zenju brought this change]
+
+ FTP: FTPFILE_NOCWD: avoid redundant CWDs
+
+ Closes #4382
+
+- cookie: pass in the correct cookie amount to qsort()
+
+ As the loop discards cookies without domain set. This bug would lead to
+ qsort() trying to sort uninitialized pointers. We have however not found
+ it a security problem.
+
+ Reported-by: Paul Dreik
+ Closes #4386
+
+- [Paul Dreik brought this change]
+
+ urlapi: avoid index underflow for short ipv6 hostnames
+
+ If the input hostname is "[", hlen will underflow to max of size_t when
+ it is subtracted with 2.
+
+ hostname[hlen] will then cause a warning by ubsanitizer:
+
+ runtime error: addition of unsigned offset to 0x<snip> overflowed to
+ 0x<snip>
+
+ I think that in practice, the generated code will work, and the output
+ of hostname[hlen] will be the first character "[".
+
+ This can be demonstrated by the following program (tested in both clang
+ and gcc, with -O3)
+
+ int main() {
+ char* hostname=strdup("[");
+ size_t hlen = strlen(hostname);
+
+ hlen-=2;
+ hostname++;
+ printf("character is %d\n",+hostname[hlen]);
+ free(hostname-1);
+ }
+
+ I found this through fuzzing, and even if it seems harmless, the proper
+ thing is to return early with an error.
+
+ Closes #4389
+
+- [Tatsuhiro Tsujikawa brought this change]
+
+ ngtcp2: compile with latest ngtcp2 + nghttp3 draft-23
+
+ Closes #4392
+
+- THANKS-filter: deal with my typos 'Jat' => 'Jay'
+
+- travis: use go master
+
+ ... as the boringssl builds needs a very recent version
+
+ Co-authored-by: Jat Satiro
+ Closes #4361
+
+- tool_operate: removed unused variable 'done'
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- tool_operate: Expression 'config->resume_from' is always true
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- tool_getparam: remove duplicate switch case
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- libssh2: part of conditional expression is always true: !result
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- urlapi: Expression 'storep' is always true
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- urlapi: 'scheme' is always true
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- urlapi: part of conditional expression is always true: (relurl[0] == '/')
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- setopt: store CURLOPT_RTSP_SERVER_CSEQ correctly
+
+ Fixes bug detected by PVS-Studio
+ Fixes #4374
+
+- mime: make Curl_mime_duppart() assert if called without valid dst
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- http_proxy: part of conditional expression is always true: !error
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- imap: merged two case-branches performing the same action
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- multi: value '2L' is assigned to a boolean
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- easy: part of conditional expression is always true: !result
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- netrc: part of conditional expression is always true: !done
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- version: Expression 'left > 1' is always true
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- url: remove dead code
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- url: part of expression is always true: (bundle->multiuse == 0)
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- ftp: the conditional expression is always true
+
+ ... both !result and (ftp->transfer != FTPTRANSFER_BODY)!
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- ftp: Expression 'ftpc->wait_data_conn' is always false
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- ftp: Expression 'ftpc->wait_data_conn' is always true
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- ftp: part of conditional expression is always true: !result
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+
+- http: fix Expression 'http->postdata' is always false
+
+ Fixes warning detected by PVS-Studio
+ Fixes #4374
+ Reported-by: Valerii Zapodovnikov
+
+- [Niall O'Reilly brought this change]
+
+ doh: avoid truncating DNS QTYPE to lower octet
+
+ Closes #4381
+
+- [Jens Finkhaeuser brought this change]
+
+ urlapi: CURLU_NO_AUTHORITY allows empty authority/host part
+
+ CURLU_NO_AUTHORITY is intended for use with unknown schemes (i.e. not
+ "file:///") to override cURL's default demand that an authority exists.
+
+ Closes #4349
+
+- version: next release will be 7.67.0
+
+- RELEASE-NOTES: synced
+
+- url: only reuse TLS connections with matching pinning
+
+ If the requests have different CURLOPT_PINNEDPUBLICKEY strings set, the
+ connection should not be reused.
+
+ Bug: https://curl.haxx.se/mail/lib-2019-09/0061.html
+ Reported-by: Sebastian Haglund
+
+ Closes #4347
+
+- README: add OSS-Fuzz badge [skip ci]
+
+ Closes #4380
+
+Michael Kaufmann (18 Sep 2019)
+- http: merge two "case" statements
+
+Daniel Stenberg (18 Sep 2019)
+- [Zenju brought this change]
+
+ FTP: remove trailing slash from path for LIST/MLSD
+
+ Closes #4348
+
+- mime: when disabled, avoid C99 macro
+
+ Closes #4368
+
+- url: cleanup dangling DOH request headers too
+
+ Follow-up to 9bc44ff64d9081
+
+ Credit to OSS-Fuzz
+ Bug: https://crbug.com/oss-fuzz/17269
+
+ Closes #4372
+
+- [Christoph M. Becker brought this change]
+
+ http2: relax verification of :authority in push promise requests
+
+ If the :authority pseudo header field doesn't contain an explicit port,
+ we assume it is valid for the default port, instead of rejecting the
+ request for all ports.
+
+ Ref: https://curl.haxx.se/mail/lib-2019-09/0041.html
+
+ Closes #4365
+
+- doh: clean up dangling DOH handles and memory on easy close
+
+ If you set the same URL for target as for DoH (and it isn't a DoH
+ server), like "https://example.com" in both, the easy handles used for
+ the DoH requests could be left "dangling" and end up not getting freed.
+
+ Reported-by: Paul Dreik
+ Closes #4366
+
+- unit1655: make it C90 compliant
+
+ Unclear why this was not detected in the CI.
+
+ Follow-up to b7666027296a
+
+- smb: check for full size message before reading message details
+
+ To avoid reading of uninitialized data.
+
+ Assisted-by: Max Dymond
+ Bug: https://crbug.com/oss-fuzz/16907
+ Closes #4363
+
+- quiche: persist connection details
+
+ ... like we do for other protocols at connect time. This makes "curl -I"
+ and other things work.
+
+ Reported-by: George Liu
+ Fixes #4358
+ Closes #4360
+
+- openssl: fix warning with boringssl and SSL_CTX_set_min_proto_version
+
+ Follow-up to ffe34b7b59
+ Closes #4359
+
+- [Paul Dreik brought this change]
+
+ doh: fix undefined behaviour and open up for gcc and clang optimization
+
+ The undefined behaviour is annoying when running fuzzing with
+ sanitizers. The codegen is the same, but the meaning is now not up for
+ dispute. See https://cppinsights.io/s/516a2ff4
+
+ By incrementing the pointer first, both gcc and clang recognize this as
+ a bswap and optimizes it to a single instruction. See
+ https://godbolt.org/z/994Zpx
+
+ Closes #4350
+
+- [Paul Dreik brought this change]
+
+ doh: fix (harmless) buffer overrun
+
+ Added unit test case 1655 to verify.
+ Close #4352
+
+ the code correctly finds the flaws in the old code,
+ if one temporarily restores doh.c to the old version.
+
+Alessandro Ghedini (15 Sep 2019)
+- docs: remove trailing ':' from section names in CURLOPT_TRAILER* man
+
+- docs: fix typo in CURLOPT_HTTP_VERSION man
+
+GitHub (14 Sep 2019)
+- [Daniel Stenberg brought this change]
+
+ CI: inintial github action job
+
+ First shot at a CI build on github actions
+
+Daniel Stenberg (13 Sep 2019)
+- appveyor: add a winbuild
+
+ Assisted-by: Marcel Raad
+ Assisted-by: Jay Satiro
+
+ Closes #4324
+
+- FTP: allow "rubbish" prepended to the SIZE response
+
+ This is a protocol violation but apparently there are legacy proprietary
+ servers doing this.
+
+ Added test 336 and 337 to verify.
+
+ Reported-by: Philippe Marguinaud
+ Closes #4339
+
+- [Zenju brought this change]
+
+ FTP: skip CWD to entry dir when target is absolute
+
+ Closes #4332
+
+Kamil Dudka (13 Sep 2019)
+- curl: fix memory leaked by parse_metalink()
+
+ This commit fixes a regression introduced by curl-7_65_3-5-gb88940850.
+ Detected by tests 2005, 2008, 2009, 2010, 2011, and 2012 with valgrind
+ and libmetalink enabled.
+
+ Closes #4326
+
+Daniel Stenberg (13 Sep 2019)
+- parsedate: still provide the name arrays when disabled
+
+ If FILE or FTP are enabled, since they also use them!
+
+ Reported-by: Roland Hieber
+ Fixes #4325
+ Closes #4343
+
+- [Gilles Vollant brought this change]
+
+ curl:file2string: load large files much faster
+
+ ... by using a more efficient realloc scheme.
+
+ Bug: https://curl.haxx.se/mail/lib-2019-09/0045.html
+ Closes #4336
+
+- openssl: close_notify on the FTP data connection doesn't mean closure
+
+ For FTPS transfers, curl gets close_notify on the data connection
+ without that being a signal to close the control connection!
+
+ Regression since 3f5da4e59a556fc (7.65.0)
+
+ Reported-by: Zenju on github
+ Reviewed-by: Jay Satiro
+ Fixes #4329
+ Closes #4340
+
+- [Jimmy Gaussen brought this change]
+
+ docs/HTTP3: fix `--with-ssl` ngtcp2 configure flag
+
+ Closes #4338
+
+- RELEASE-NOTES: synced
+
+- curlver: bump to 7.66.1
+
+- [Zenju brought this change]
+
+ setopt: make it easier to add new enum values
+
+ ... by using the *_LAST define names better.
+
+ Closes #4321
+
+- asyn-thread: s/AF_LOCAL/AF_UNIX for Solaris
+
+ Reported-by: Dagobert Michelsen
+ Fixes #4328
+ Closes #4333
+
+- [Bernhard Walle brought this change]
+
+ winbuild/MakefileBuild.vc: Add vssh
+
+ Without that modification, the Windows build using the makefiles doesn't
+ work.
+
+ Signed-off-by: Bernhard Walle <bernhard.walle@posteo.eu>
+
+ Fixes #4322
+ Closes #4323
+
+Bernhard Walle (11 Sep 2019)
+- winbuild/MakefileBuild.vc: Fix line endings
+
+ The file had mixed line endings.
+
+ Signed-off-by: Bernhard Walle <bernhard.walle@posteo.eu>
+
+Jay Satiro (11 Sep 2019)
+- ldap: Stop using wide char version of ldapp_err2string
+
+ Despite ldapp_err2string being documented by MS as returning a
+ PCHAR (char *), when UNICODE it is mapped to ldap_err2stringW and
+ returns PWCHAR (wchar_t *).
+
+ We have lots of code that expects ldap_err2string to return char *,
+ most of it failf used like this:
+
+ failf(data, "LDAP local: Some error: %s", ldap_err2string(rc));
+
+ Closes https://github.com/curl/curl/pull/4272
+
Version 7.66.0 (10 Sep 2019)
Daniel Stenberg (10 Sep 2019)
@@ -5598,2028 +7097,3 @@ Alessandro Ghedini (7 Feb 2019)
% curl -E <TAB>
Bug: https://bugs.debian.org/921452
-
-- zsh.pl: update regex to better match curl -h output
-
- The current regex fails to match '<...>' arguments properly (e.g. those
- with spaces in them), which causes an completion script with wrong
- descriptions for some options.
-
- Here's a diff of the generated completion script, comparing the previous
- version to the one with this fix:
-
- --- /usr/share/zsh/vendor-completions/_curl 2019-01-15 20:47:40.000000000 +0000
- +++ _curl 2019-02-05 20:57:29.453349040 +0000
- @@ -9,48 +9,48 @@
-
- _arguments -C -S \
- --happy-eyeballs-timeout-ms'[How long to wait in milliseconds for IPv6 before trying IPv4]':'<milliseconds>' \
- + --resolve'[Resolve the host+port to this address]':'<host:port:address[,address]...>' \
- {-c,--cookie-jar}'[Write cookies to <filename> after operation]':'<filename>':_files \
- {-D,--dump-header}'[Write the received headers to <filename>]':'<filename>':_files \
- {-y,--speed-time}'[Trigger '\''speed-limit'\'' abort after this time]':'<seconds>' \
- --proxy-cacert'[CA certificate to verify peer against for proxy]':'<file>':_files \
- - --tls13-ciphers'[of TLS 1.3 ciphersuites> TLS 1.3 cipher suites to use]':'<list' \
- + --tls13-ciphers'[TLS 1.3 cipher suites to use]':'<list of TLS 1.3 ciphersuites>' \
- {-E,--cert}'[Client certificate file and password]':'<certificate[:password]>' \
- --libcurl'[Dump libcurl equivalent code of this command line]':'<file>':_files \
- --proxy-capath'[CA directory to verify peer against for proxy]':'<dir>':_files \
- - --proxy-negotiate'[HTTP Negotiate (SPNEGO) authentication on the proxy]':'Use' \
- --proxy-pinnedpubkey'[FILE/HASHES public key to verify proxy with]':'<hashes>' \
- --crlfile'[Get a CRL list in PEM format from the given file]':'<file>':_files \
- - --proxy-insecure'[HTTPS proxy connections without verifying the proxy]':'Do' \
- - --proxy-ssl-allow-beast'[security flaw for interop for HTTPS proxy]':'Allow' \
- + --proxy-negotiate'[Use HTTP Negotiate (SPNEGO) authentication on the proxy]' \
- --abstract-unix-socket'[Connect via abstract Unix domain socket]':'<path>' \
- --pinnedpubkey'[FILE/HASHES Public key to verify peer against]':'<hashes>' \
- + --proxy-insecure'[Do HTTPS proxy connections without verifying the proxy]' \
- --proxy-pass'[Pass phrase for the private key for HTTPS proxy]':'<phrase>' \
- + --proxy-ssl-allow-beast'[Allow security flaw for interop for HTTPS proxy]' \
- {-p,--proxytunnel}'[Operate through an HTTP proxy tunnel (using CONNECT)]' \
- --socks5-hostname'[SOCKS5 proxy, pass host name to proxy]':'<host[:port]>' \
- --proto-default'[Use PROTOCOL for any URL missing a scheme]':'<protocol>' \
- - --proxy-tls13-ciphers'[list> TLS 1.3 proxy cipher suites]':'<ciphersuite' \
- + --proxy-tls13-ciphers'[TLS 1.3 proxy cipher suites]':'<ciphersuite list>' \
- --socks5-gssapi-service'[SOCKS5 proxy service name for GSS-API]':'<name>' \
- --ftp-alternative-to-user'[String to replace USER \[name\]]':'<command>' \
- - --ftp-ssl-control'[SSL/TLS for FTP login, clear for transfer]':'Require' \
- {-T,--upload-file}'[Transfer local FILE to destination]':'<file>':_files \
- --local-port'[Force use of RANGE for local port numbers]':'<num/range>' \
- --proxy-tlsauthtype'[TLS authentication type for HTTPS proxy]':'<type>' \
- {-R,--remote-time}'[Set the remote file'\''s time on the local output]' \
- - --retry-connrefused'[on connection refused (use with --retry)]':'Retry' \
- - --suppress-connect-headers'[proxy CONNECT response headers]':'Suppress' \
- - {-j,--junk-session-cookies}'[session cookies read from file]':'Ignore' \
- - --location-trusted'[--location, and send auth to other hosts]':'Like' \
- + --ftp-ssl-control'[Require SSL/TLS for FTP login, clear for transfer]' \
- --proxy-cert-type'[Client certificate type for HTTPS proxy]':'<type>' \
- {-O,--remote-name}'[Write output to a file named as the remote file]' \
- + --retry-connrefused'[Retry on connection refused (use with --retry)]' \
- + --suppress-connect-headers'[Suppress proxy CONNECT response headers]' \
- --trace-ascii'[Like --trace, but without hex output]':'<file>':_files \
- --connect-timeout'[Maximum time allowed for connection]':'<seconds>' \
- --expect100-timeout'[How long to wait for 100-continue]':'<seconds>' \
- {-g,--globoff}'[Disable URL sequences and ranges using {} and \[\]]' \
- + {-j,--junk-session-cookies}'[Ignore session cookies read from file]' \
- {-m,--max-time}'[Maximum time allowed for the transfer]':'<seconds>' \
- --dns-ipv4-addr'[IPv4 address to use for DNS requests]':'<address>' \
- --dns-ipv6-addr'[IPv6 address to use for DNS requests]':'<address>' \
- - --ignore-content-length'[the size of the remote resource]':'Ignore' \
- {-k,--insecure}'[Allow insecure server connections when using SSL]' \
- + --location-trusted'[Like --location, and send auth to other hosts]' \
- --mail-auth'[Originator address of the original email]':'<address>' \
- --noproxy'[List of hosts which do not use proxy]':'<no-proxy-list>' \
- --proto-redir'[Enable/disable PROTOCOLS on redirect]':'<protocols>' \
- @@ -62,18 +62,19 @@
- --socks5-basic'[Enable username/password auth for SOCKS5 proxies]' \
- --cacert'[CA certificate to verify peer against]':'<file>':_files \
- {-H,--header}'[Pass custom header(s) to server]':'<header/@file>' \
- + --ignore-content-length'[Ignore the size of the remote resource]' \
- {-i,--include}'[Include protocol response headers in the output]' \
- --proxy-header'[Pass custom header(s) to proxy]':'<header/@file>' \
- --unix-socket'[Connect through this Unix domain socket]':'<path>' \
- {-w,--write-out}'[Use output FORMAT after completion]':'<format>' \
- - --http2-prior-knowledge'[HTTP 2 without HTTP/1.1 Upgrade]':'Use' \
- {-o,--output}'[Write to file instead of stdout]':'<file>':_files \
- - {-J,--remote-header-name}'[the header-provided filename]':'Use' \
- + --preproxy'[\[protocol://\]host\[:port\] Use this proxy first]' \
- --socks4a'[SOCKS4a proxy on given host + port]':'<host[:port]>' \
- {-Y,--speed-limit}'[Stop transfers slower than this]':'<speed>' \
- {-z,--time-cond}'[Transfer based on a time condition]':'<time>' \
- --capath'[CA directory to verify peer against]':'<dir>':_files \
- {-f,--fail}'[Fail silently (no output at all) on HTTP errors]' \
- + --http2-prior-knowledge'[Use HTTP 2 without HTTP/1.1 Upgrade]' \
- --proxy-tlspassword'[TLS password for HTTPS proxy]':'<string>' \
- {-U,--proxy-user}'[Proxy user and password]':'<user:password>' \
- --proxy1.0'[Use HTTP/1.0 proxy on given port]':'<host[:port]>' \
- @@ -81,52 +82,49 @@
- {-A,--user-agent}'[Send User-Agent <name> to server]':'<name>' \
- --egd-file'[EGD socket path for random data]':'<file>':_files \
- --fail-early'[Fail on first transfer error, do not continue]' \
- - --haproxy-protocol'[HAProxy PROXY protocol v1 header]':'Send' \
- - --preproxy'[Use this proxy first]':'[protocol://]host[:port]' \
- + {-J,--remote-header-name}'[Use the header-provided filename]' \
- --retry-max-time'[Retry only within this period]':'<seconds>' \
- --socks4'[SOCKS4 proxy on given host + port]':'<host[:port]>' \
- --socks5'[SOCKS5 proxy on given host + port]':'<host[:port]>' \
- - --socks5-gssapi-nec'[with NEC SOCKS5 server]':'Compatibility' \
- - --ssl-allow-beast'[security flaw to improve interop]':'Allow' \
- --cert-status'[Verify the status of the server certificate]' \
- - --ftp-create-dirs'[the remote dirs if not present]':'Create' \
- {-:,--next}'[Make next URL use its separate set of options]' \
- --proxy-key-type'[Private key file type for proxy]':'<type>' \
- - --remote-name-all'[the remote file name for all URLs]':'Use' \
- {-X,--request}'[Specify request command to use]':'<command>' \
- --retry'[Retry request if transient problems occur]':'<num>' \
- - --ssl-no-revoke'[cert revocation checks (WinSSL)]':'Disable' \
- --cert-type'[Certificate file type (DER/PEM/ENG)]':'<type>' \
- --connect-to'[Connect to host]':'<HOST1:PORT1:HOST2:PORT2>' \
- --create-dirs'[Create necessary local directory hierarchy]' \
- + --haproxy-protocol'[Send HAProxy PROXY protocol v1 header]' \
- --max-redirs'[Maximum number of redirects allowed]':'<num>' \
- {-n,--netrc}'[Must read .netrc for user name and password]' \
- + {-x,--proxy}'[\[protocol://\]host\[:port\] Use this proxy]' \
- --proxy-crlfile'[Set a CRL list for proxy]':'<file>':_files \
- --sasl-ir'[Enable initial response in SASL authentication]' \
- - --socks5-gssapi'[GSS-API auth for SOCKS5 proxies]':'Enable' \
- + --socks5-gssapi-nec'[Compatibility with NEC SOCKS5 server]' \
- + --ssl-allow-beast'[Allow security flaw to improve interop]' \
- + --ftp-create-dirs'[Create the remote dirs if not present]' \
- --interface'[Use network INTERFACE (or address)]':'<name>' \
- --key-type'[Private key file type (DER/PEM/ENG)]':'<type>' \
- --netrc-file'[Specify FILE for netrc]':'<filename>':_files \
- {-N,--no-buffer}'[Disable buffering of the output stream]' \
- --proxy-service-name'[SPNEGO proxy service name]':'<name>' \
- - --styled-output'[styled output for HTTP headers]':'Enable' \
- + --remote-name-all'[Use the remote file name for all URLs]' \
- + --ssl-no-revoke'[Disable cert revocation checks (WinSSL)]' \
- --max-filesize'[Maximum file size to download]':'<bytes>' \
- --negotiate'[Use HTTP Negotiate (SPNEGO) authentication]' \
- --no-keepalive'[Disable TCP keepalive on the connection]' \
- {-#,--progress-bar}'[Display transfer progress as a bar]' \
- - {-x,--proxy}'[Use this proxy]':'[protocol://]host[:port]' \
- - --proxy-anyauth'[any proxy authentication method]':'Pick' \
- {-Q,--quote}'[Send command(s) to server before transfer]' \
- - --request-target'[the target for this request]':'Specify' \
- + --socks5-gssapi'[Enable GSS-API auth for SOCKS5 proxies]' \
- {-u,--user}'[Server user and password]':'<user:password>' \
- {-K,--config}'[Read config from a file]':'<file>':_files \
- {-C,--continue-at}'[Resumed transfer offset]':'<offset>' \
- --data-raw'[HTTP POST data, '\''@'\'' allowed]':'<data>' \
- - --disallow-username-in-url'[username in url]':'Disallow' \
- --krb'[Enable Kerberos with security <level>]':'<level>' \
- --proxy-ciphers'[SSL ciphers to use for proxy]':'<list>' \
- --proxy-digest'[Use Digest authentication on the proxy]' \
- --proxy-tlsuser'[TLS username for HTTPS proxy]':'<name>' \
- + --styled-output'[Enable styled output for HTTP headers]' \
- {-b,--cookie}'[Send cookies from string/file]':'<data>' \
- --data-urlencode'[HTTP POST data url encoded]':'<data>' \
- --delegation'[GSS-API delegation permission]':'<LEVEL>' \
- @@ -134,7 +132,10 @@
- --post301'[Do not switch to GET after following a 301]' \
- --post302'[Do not switch to GET after following a 302]' \
- --post303'[Do not switch to GET after following a 303]' \
- + --proxy-anyauth'[Pick any proxy authentication method]' \
- + --request-target'[Specify the target for this request]' \
- --trace-time'[Add time stamps to trace/verbose output]' \
- + --disallow-username-in-url'[Disallow username in url]' \
- --dns-servers'[DNS server addrs to use]':'<addresses>' \
- {-G,--get}'[Put the post data in the URL and use GET]' \
- --limit-rate'[Limit transfer speed to RATE]':'<speed>' \
- @@ -148,21 +149,21 @@
- --metalink'[Process given URLs as metalink XML file]' \
- --tr-encoding'[Request compressed transfer encoding]' \
- --xattr'[Store metadata in extended file attributes]' \
- - --ftp-skip-pasv-ip'[the IP address for PASV]':'Skip' \
- --pass'[Pass phrase for the private key]':'<phrase>' \
- --proxy-ntlm'[Use NTLM authentication on the proxy]' \
- {-S,--show-error}'[Show error even when -s is used]' \
- - --ciphers'[of ciphers> SSL ciphers to use]':'<list' \
- + --ciphers'[SSL ciphers to use]':'<list of ciphers>' \
- --form-string'[Specify multipart MIME data]':'<name=string>' \
- --login-options'[Server login options]':'<options>' \
- --tftp-blksize'[Set TFTP BLKSIZE option]':'<value>' \
- - --tftp-no-options'[not send any TFTP options]':'Do' \
- {-v,--verbose}'[Make the operation more talkative]' \
- + --ftp-skip-pasv-ip'[Skip the IP address for PASV]' \
- --proxy-key'[Private key for HTTPS proxy]':'<key>' \
- {-F,--form}'[Specify multipart MIME data]':'<name=content>' \
- --mail-from'[Mail from this address]':'<address>' \
- --oauth2-bearer'[OAuth 2 Bearer Token]':'<token>' \
- --proto'[Enable/disable PROTOCOLS]':'<protocols>' \
- + --tftp-no-options'[Do not send any TFTP options]' \
- --tlsauthtype'[TLS authentication type]':'<type>' \
- --doh-url'[Resolve host names over DOH]':'<URL>' \
- --no-sessionid'[Disable SSL session-ID reusing]' \
- @@ -173,14 +174,13 @@
- --ftp-ssl-ccc'[Send CCC after authenticating]' \
- {-4,--ipv4}'[Resolve names to IPv4 addresses]' \
- {-6,--ipv6}'[Resolve names to IPv6 addresses]' \
- - --netrc-optional'[either .netrc or URL]':'Use' \
- --service-name'[SPNEGO service name]':'<name>' \
- {-V,--version}'[Show version number and quit]' \
- --data-ascii'[HTTP POST ASCII data]':'<data>' \
- --ftp-account'[Account data string]':'<data>' \
- - --compressed-ssh'[SSH compression]':'Enable' \
- --disable-eprt'[Inhibit using EPRT or LPRT]' \
- --ftp-method'[Control CWD usage]':'<method>' \
- + --netrc-optional'[Use either .netrc or URL]' \
- --pubkey'[SSH Public key file name]':'<key>' \
- --raw'[Do HTTP "raw"; no transfer decoding]' \
- --anyauth'[Pick any authentication method]' \
- @@ -189,6 +189,7 @@
- --no-alpn'[Disable the ALPN TLS extension]' \
- --tcp-nodelay'[Use the TCP_NODELAY option]' \
- {-B,--use-ascii}'[Use ASCII/text transfer]' \
- + --compressed-ssh'[Enable SSH compression]' \
- --digest'[Use HTTP Digest Authentication]' \
- --proxy-tlsv1'[Use TLSv1 for HTTPS proxy]' \
- --engine'[Crypto engine to use]':'<name>' \
-
-Marcel Raad (7 Feb 2019)
-- tool_operate: fix typecheck warning
-
- Use long for CURLOPT_HTTP09_ALLOWED to fix the following warning:
- tool_operate.c: In function 'operate_do':
- ../include/curl/typecheck-gcc.h:47:9: error: call to
- '_curl_easy_setopt_err_long' declared with attribute warning:
- curl_easy_setopt expects a long argument for this option [-Werror]
-
- Closes https://github.com/curl/curl/pull/3534
-
-Jay Satiro (6 Feb 2019)
-- [Chris Araman brought this change]
-
- url: close TLS before removing conn from cache
-
- - Fix potential crashes in schannel shutdown.
-
- Ensure any TLS shutdown messages are sent before removing the
- association between the connection and the easy handle. Reverts
- @bagder's previous partial fix for #3412.
-
- Fixes https://github.com/curl/curl/issues/3412
- Fixes https://github.com/curl/curl/issues/3505
- Closes https://github.com/curl/curl/pull/3531
-
-Daniel Gustafsson (6 Feb 2019)
-- INTERNALS.md: fix subsection depth and link
-
- The Kerberos subsection was mistakenly a subsubsection under FTP, and
- the curlx subsection was missing an anchor for the TOC link.
-
- Closes #3529
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
-
-Version 7.64.0 (6 Feb 2019)
-
-Daniel Stenberg (6 Feb 2019)
-- RELEASE-NOTES: 7.64.0
-
-- RELEASE-PROCEDURE: update the release calendar
-
-- THANKS: 7.64.0 status
-
-Daniel Gustafsson (5 Feb 2019)
-- ROADMAP: remove already performed item
-
- Commit 7a09b52c98ac8d840a8a9907b1a1d9a9e684bcf5 introduced support
- for the draft-ietf-httpbis-cookie-alone-01 cookie draft, and while
- the entry was removed from the TODO it was mistakenly left here.
- Fix by removing and rewording the entry slightly.
-
- Closes #3530
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
-
-- [Etienne Simard brought this change]
-
- CONTRIBUTE.md: Fix grammatical errors
-
- Fix grammatical errors making the document read better. Also fixes
- a typo.
-
- Closes #3525
- Reviewed-by: Daniel Gustafsson <daniel@yesql.se>
-
-Daniel Stenberg (4 Feb 2019)
-- [Julian Z brought this change]
-
- docs: use $(INSTALL_DATA) to install man page
-
- Fixes #3518
- Closes #3522
-
-Jay Satiro (4 Feb 2019)
-- [Ladar Levison brought this change]
-
- runtests.pl: Fix perl call to include srcdir
-
- - Use explicit include opt for perl calls.
-
- Prior to this change some scripts couldn't find their dependencies.
-
- At the top, perl is called using with the "-Isrcdir" option, and it
- works:
-
- https://github.com/curl/curl/blob/curl-7_63_0/tests/runtests.pl#L183
-
- But on line 3868, that option is omitted. This caused problems for me,
- as the symbol-scan.pl script in particular couldn't find its
- dependencies properly:
-
- https://github.com/curl/curl/blob/curl-7_63_0/tests/runtests.pl#L3868
-
- This patch fixes that oversight by making calls to perl sub-shells
- uniform.
-
- Closes https://github.com/curl/curl/pull/3496
-
-Daniel Stenberg (4 Feb 2019)
-- [Daniel Gustafsson brought this change]
-
- smtp: avoid risk of buffer overflow in strtol
-
- If the incoming len 5, but the buffer does not have a termination
- after 5 bytes, the strtol() call may keep reading through the line
- buffer until is exceeds its boundary. Fix by ensuring that we are
- using a bounded read with a temporary buffer on the stack.
-
- Bug: https://curl.haxx.se/docs/CVE-2019-3823.html
- Reported-by: Brian Carpenter (Geeknik Labs)
- CVE-2019-3823
-
-- ntlm: fix *_type3_message size check to avoid buffer overflow
-
- Bug: https://curl.haxx.se/docs/CVE-2019-3822.html
- Reported-by: Wenxiang Qian
- CVE-2019-3822
-
-- NTLM: fix size check condition for type2 received data
-
- Bug: https://curl.haxx.se/docs/CVE-2018-16890.html
- Reported-by: Wenxiang Qian
- CVE-2018-16890
-
-Marcel Raad (1 Feb 2019)
-- [Giorgos Oikonomou brought this change]
-
- spnego_sspi: add support for channel binding
-
- Attempt to add support for Secure Channel binding when negotiate
- authentication is used. The problem to solve is that by default IIS
- accepts channel binding and curl doesn't utilise them. The result was a
- 401 response. Scope affects only the Schannel(winssl)-SSPI combination.
-
- Fixes https://github.com/curl/curl/issues/3503
- Closes https://github.com/curl/curl/pull/3509
-
-Daniel Stenberg (1 Feb 2019)
-- RELEASE-NOTES: synced
-
-- schannel: stop calling it "winssl"
-
- Stick to "Schannel" everywhere. The configure option --with-winssl is
- kept to allow existing builds to work but --with-schannel is added as an
- alias.
-
- Closes #3504
-
-- multi: set the EXPIRE_*TIMEOUT timers at TIMER_STARTSINGLE time
-
- To make sure Curl_timeleft() also thinks the timeout has been reached
- when one of the EXPIRE_*TIMEOUTs expires.
-
- Bug: https://curl.haxx.se/mail/lib-2019-01/0073.html
- Reported-by: Zhao Yisha
- Closes #3501
-
-- [John Marshall brought this change]
-
- doc: use meaningless port number in CURLOPT_LOCALPORT example
-
- Use an ephemeral port number here; previously the example had 8080
- which could be confusing as the common web server port number might
- be misinterpreted as suggesting this option affects the remote port.
-
- URL: https://curl.haxx.se/mail/lib-2019-01/0084.html
- Closes #3513
-
-GitHub (29 Jan 2019)
-- [Gisle Vanem brought this change]
-
- Escape the '\'
-
- A backslash should be escaped in Roff / Troff.
-
-Jay Satiro (29 Jan 2019)
-- TODO: WinSSL: 'Add option to disable client cert auto-send'
-
- By default WinSSL selects and send a client certificate automatically,
- but for privacy and consistency we should offer an option to disable the
- default auto-send behavior.
-
- Reported-by: Jeroen Ooms
-
- Closes https://github.com/curl/curl/issues/2262
-
-Daniel Stenberg (28 Jan 2019)
-- [Jeremie Rapin brought this change]
-
- sigpipe: if mbedTLS is used, ignore SIGPIPE
-
- mbedTLS doesn't have a sigpipe management. If a write/read occurs when
- the remote closes the socket, the signal is raised and kills the
- application. Use the curl mecanisms fix this behavior.
-
- Signed-off-by: Jeremie Rapin <j.rapin@overkiz.com>
-
- Closes #3502
-
-- unit1653: make it survive torture tests
-
-Jay Satiro (28 Jan 2019)
-- [Michael Kujawa brought this change]
-
- timeval: Disable MSVC Analyzer GetTickCount warning
-
- Compiling with msvc /analyze and a recent Windows SDK warns against
- using GetTickCount (Suggests to use GetTickCount64 instead.)
-
- Since GetTickCount is only being used when GetTickCount64 isn't
- available, I am disabling that warning.
-
- Fixes https://github.com/curl/curl/issues/3437
- Closes https://github.com/curl/curl/pull/3440
-
-Daniel Stenberg (26 Jan 2019)
-- configure: rewrite --enable-code-coverage
-
- The previously used ax_code_coverage.m4 is not license compatible and
- must not be used.
-
- Reported-by: William A. Rowe Jr
- Fixes #3497
- Closes #3499
-
-- [Felix Hädicke brought this change]
-
- setopt: enable CURLOPT_SSH_KNOWNHOSTS and CURLOPT_SSH_KEYFUNCTION for libssh
-
- CURLOPT_SSH_KNOWNHOSTS and CURLOPT_SSH_KEYFUNCTION are supported for
- libssh as well. So accepting these options only when compiling with
- libssh2 is wrong here.
-
- Fixes #3493
- Closes #3494
-
-- [Felix Hädicke brought this change]
-
- libssh: do not let libssh create socket
-
- By default, libssh creates a new socket, instead of using the socket
- created by curl for SSH connections.
-
- Pass the socket created by curl to libssh using ssh_options_set() with
- SSH_OPTIONS_FD directly after ssh_new(). So libssh uses our socket
- instead of creating a new one.
-
- This approach is very similar to what is done in the libssh2 code, where
- the socket created by curl is passed to libssh2 when
- libssh2_session_startup() is called.
-
- Fixes #3491
- Closes #3495
-
-- RELEASE-NOTES: synced
-
-- [Archangel_SDY brought this change]
-
- schannel: preserve original certificate path parameter
-
- Fixes #3480
- Closes #3487
-
-- KNOWN_BUGS: tests not compatible with python3
-
- Closes #3289
- [skip ci]
-
-Daniel Gustafsson (20 Jan 2019)
-- memcmp: avoid doing single char memcmp
-
- There is no real gain in performing memcmp() comparisons on single
- characters, so change these to array subscript inspections which
- saves a call and makes the code clearer.
-
- Closes #3486
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
- Reviewed-by: Jay Satiro <raysatiro@yahoo.com>
-
-Daniel Stenberg (19 Jan 2019)
-- COPYING: it's 2019
-
- [skip ci]
-
-- [hhb brought this change]
-
- configure: fix recv/send/select detection on Android
-
- This reverts commit d4f25201fb7da03fc88f90d51101beb3d0026db9.
-
- The overloadable attribute is removed again starting from
- NDK17. Actually they only exist in two NDK versions (15 and 16). With
- overloadable, the first condition tried will succeed. Results in wrong
- detection result.
-
- Closes #3484
-
-Marcel Raad (19 Jan 2019)
-- [Giorgos Oikonomou brought this change]
-
- ntlm_sspi: add support for channel binding
-
- Windows extended potection (aka ssl channel binding) is required
- to login to ntlm IIS endpoint, otherwise the server returns 401
- responses.
-
- Fixes #3280
- Closes #3321
-
-Daniel Stenberg (18 Jan 2019)
-- schannel: on connection close there might not be a transfer
-
- Reported-by: Marcel Raad
- Fixes #3412
- Closes #3483
-
-- [Joel Depooter brought this change]
-
- ssh: log the libssh2 error message when ssh session startup fails
-
- When a ssh session startup fails, it is useful to know why it has
- failed. This commit changes the message from:
- "Failure establishing ssh session"
- to something like this, for example:
- "Failure establishing ssh session: -5, Unable to exchange encryption keys"
-
- Closes #3481
-
-Alessandro Ghedini (16 Jan 2019)
-- Fix typo in manpage
-
-Daniel Stenberg (16 Jan 2019)
-- RELEASE-NOTES: synced
-
-Sergei Nikulov (16 Jan 2019)
-- cmake: updated check for HAVE_POLL_FINE to match autotools
-
-Daniel Stenberg (16 Jan 2019)
-- curl-compilers.m4: check for __ibmxl__ to detect xlclang
-
- Follow-up to 2fa0d57e2e3. The __xlc__ symbol is only defined there if a
- particular flag is used for legacy macros.
-
- Fixes #3474
- Closes #3479
-
-- openssl: fix the SSL_get_tlsext_status_ocsp_resp call
-
- .... to not pass in a const in the second argument as that's not how it
- is supposed to be used and might cause compiler warnings.
-
- Reported-by: Pavel Pavlov
- Fixes #3477
- Closes #3478
-
-- curl-compilers.m4: detect xlclang
-
- Since it isn't totally clang compatible, we detect this IBM clang
- front-end and if detected, avoids some clang specific magic.
-
- Reported-by: Kees Dekker
- Fixes #3474
- Closes #3476
-
-- README: add codacy code quality badge
-
- [skip ci]
-
-- extract_if_dead: follow-up to 54b201b48c90a
-
- extract_if_dead() dead is called from two functions, and only one of
- them should get conn->data updated and now neither call path clears it.
-
- scan-build found a case where conn->data would be NULL dereferenced in
- ConnectionExists() otherwise.
-
- Closes #3473
-
-- multi: remove "Dead assignment"
-
- Found by scan-build. Follow-up to 4c35574bb785ce.
-
- Closes #3471
-
-- tests: move objnames-* from lib into tests
-
- Since they're used purely for testing purposes, I think they should
- rather be stored there.
-
- Closes #3470
-
-Sergei Nikulov (15 Jan 2019)
-- travis: added cmake build for osx
-
-Daniel Stenberg (14 Jan 2019)
-- [Frank Gevaerts brought this change]
-
- cookie: fix comment typo (url_path_len -> uri_path_len)
-
- Closes #3469
-
-Marcel Raad (14 Jan 2019)
-- winbuild: conditionally use /DZLIB_WINAPI
-
- zlibwapi.lib (dynamic library) and zlibstat.lib (static library) have
- the ZLIB_WINAPI define set by default. Using them requires that define
- too.
-
- Ref: https://zlib.net/DLL_FAQ.txt
-
- Fixes https://github.com/curl/curl/issues/3133
- Closes https://github.com/curl/curl/pull/3460
-
-Daniel Stenberg (14 Jan 2019)
-- src/Makefile: make 'tidy' target work for metalink builds
-
-- extract_if_dead: use a known working transfer when checking connections
-
- Make sure that this function sets a proper "live" transfer for the
- connection before calling the protocol-specific connection check
- function, and then clear it again afterward as a non-used connection has
- no current transfer.
-
- Reported-by: Jeroen Ooms
- Reviewed-by: Marcel Raad
- Reviewed-by: Daniel Gustafsson
- Fixes #3463
- Closes #3464
-
-- openssl: adapt to 3.0.0, OpenSSL_version_num() is deprecated
-
- OpenSSL_version() replaces OpenSSL_version_num()
-
- Closes #3462
-
-Sergei Nikulov (11 Jan 2019)
-- cmake: added checks for HAVE_VARIADIC_MACROS_C99 and HAVE_VARIADIC_MACROS_GCC
-
-Daniel Stenberg (11 Jan 2019)
-- urldata: rename easy_conn to just conn
-
- We use "conn" everywhere to be a pointer to the connection.
-
- Introduces two functions that "attaches" and "detaches" the connection
- to and from the transfer.
-
- Going forward, we should favour using "data->conn" (since a transfer
- always only has a single connection or none at all) to "conn->data"
- (since a connection can have none, one or many transfers associated with
- it and updating conn->data to be correct is error prone and a frequent
- reason for internal issues).
-
- Closes #3442
-
-- tool_cb_prg: avoid integer overflow
-
- When calculating the progress bar width.
-
- Reported-by: Peng Li
- Fixes #3456
- Closes #3458
-
-Daniel Gustafsson (11 Jan 2019)
-- travis: turn off copyright year checks in checksrc
-
- Invoking the maintainer intended COPYRIGHTYEAR check for everyone
- in the PR pipeline is too invasive, especially at the turn of the
- year when many files get affected. Remove and leave it as a tool
- for maintainers to verify patches before commits.
-
- This reverts f7bdf4b2e1d81b2652b81b9b3029927589273b41.
-
- After discussion with: Daniel Stenberg
-
-Daniel Stenberg (10 Jan 2019)
-- KNOWN_BUGS: cmake makes unusable tool_hugehelp.c with MinGW
-
- Closes #3125
-
-- KNOWN_BUGS: Improve --data-urlencode space encoding
-
- Closes #3229
-
-Patrick Monnerat (10 Jan 2019)
-- os400: add a missing closing bracket
-
- See https://github.com/curl/curl/issues/3453#issuecomment-453054458
-
- Reported-by: jonrumsey on github
-
-- os400: fix extra parameter syntax error.
-
- Reported-by: jonrumsey on github
- Closes #3453
-
-Daniel Stenberg (10 Jan 2019)
-- test1558: verify CURLINFO_PROTOCOL on file:// transfer
-
- Attempt to reproduce issue #3444.
-
- Closes #3447
-
-- RELEASE-NOTES: synced
-
-- xattr: strip credentials from any URL that is stored
-
- Both user and password are cleared uncondtitionally.
-
- Added unit test 1621 to verify.
-
- Fixes #3423
- Closes #3433
-
-- cookies: allow secure override when done over HTTPS
-
- Added test 1562 to verify.
-
- Reported-by: Jeroen Ooms
- Fixes #3445
- Closes #3450
-
-- multi: multiplexing improvements
-
- Fixes #3436
- Closes #3448
-
- Problem 1
-
- After LOTS of scratching my head, I eventually realized that even when doing
- 10 uploads in parallel, sometimes the socket callback to the application that
- tells it what to wait for on the socket, looked like it would reflect the
- status of just the single transfer that just changed state.
-
- Digging into the code revealed that this was indeed the truth. When multiple
- transfers are using the same connection, the application did not correctly get
- the *combined* flags for all transfers which then could make it switch to READ
- (only) when in fact most transfers wanted to get told when the socket was
- WRITEABLE.
-
- Problem 1b
-
- A separate but related regression had also been introduced by me when I
- cleared connection/transfer association better a while ago, as now the logic
- couldn't find the connection and see if that was marked as used by more
- transfers and then it would also prematurely remove the socket from the socket
- hash table even in times other transfers were still using it!
-
- Fix 1
-
- Make sure that each socket stored in the socket hash has a "combined" action
- field of what to ask the application to wait for, that is potentially the ORed
- action of multiple parallel transfers. And remove that socket hash entry only
- if there are no transfers left using it.
-
- Problem 2
-
- The socket hash entry stored an association to a single transfer using that
- socket - and when curl_multi_socket_action() was called to tell libcurl about
- activities on that specific socket only that transfer was "handled".
-
- This was WRONG, as a single socket/connection can be used by numerous parallel
- transfers and not necessarily a single one.
-
- Fix 2
-
- We now store a list of handles in the socket hashtable entry and when libcurl
- is told there's traffic for a particular socket, it now iterates over all
- known transfers using that single socket.
-
-- test1561: improve test name
-
- [skip ci]
-
-- [Katsuhiko YOSHIDA brought this change]
-
- cookies: skip custom cookies when redirecting cross-site
-
- Closes #3417
-
-- THANKS: fixups and a dedupe
-
- [skip ci]
-
-- timediff: fix math for unsigned time_t
-
- Bug: https://curl.haxx.se/mail/lib-2018-12/0088.html
-
- Closes #3449
-
-- [Bernhard M. Wiedemann brought this change]
-
- tests: allow tests to pass by 2037-02-12
-
- similar to commit f508d29f3902104018
-
- Closes #3443
-
-- RELEASE-NOTES: synced
-
-- [Brad Spencer brought this change]
-
- curl_multi_remove_handle() don't block terminating c-ares requests
-
- Added Curl_resolver_kill() for all three resolver modes, which only
- blocks when necessary, along with test 1592 to confirm
- curl_multi_remove_handle() doesn't block unless it must.
-
- Closes #3428
- Fixes #3371
-
-- Revert "http_negotiate: do not close connection until negotiation is completed"
-
- This reverts commit 07ebaf837843124ee670e5b8c218b80b92e06e47.
-
- This also reopens PR #3275 which brought the change now reverted.
-
- Fixes #3384
- Closes #3439
-
-- curl/urlapi.h: include "curl.h" first
-
- This allows programs to include curl/urlapi.h directly.
-
- Reviewed-by: Daniel Gustafsson
- Reported-by: Ben Kohler
- Fixes #3438
- Closes #3441
-
-Marcel Raad (6 Jan 2019)
-- VS projects: fix build warning
-
- Starting with Visual Studio 2017 Update 9, Visual Studio doesn't like
- the MinimalRebuild option anymore and warns:
-
- cl : Command line warning D9035: option 'Gm' has been deprecated and
- will be removed in a future release
-
- The option can be safely removed so that the default is used.
-
- Closes https://github.com/curl/curl/pull/3425
-
-- schannel: fix compiler warning
-
- When building with Unicode on MSVC, the compiler warns about freeing a
- pointer to const in Curl_unicodefree. Fix this by declaring it as
- non-const and casting the argument to Curl_convert_UTF8_to_tchar to
- non-const too, like we do in all other places.
-
- Closes https://github.com/curl/curl/pull/3435
-
-Daniel Stenberg (4 Jan 2019)
-- [Rikard Falkeborn brought this change]
-
- printf: introduce CURL_FORMAT_TIMEDIFF_T
-
-- [Rikard Falkeborn brought this change]
-
- printf: fix format specifiers
-
- Closes #3426
-
-- libtest/stub_gssapi: use "real" snprintf
-
- ... since it doesn't link with libcurl.
-
- Reverts the commit dcd6f81025 changes from this file.
-
- Bug: https://curl.haxx.se/mail/lib-2019-01/0000.html
- Reported-by: Shlomi Fish
- Reviewed-by: Daniel Gustafsson
- Reviewed-by: Kamil Dudka
-
- Closes #3434
-
-- INTERNALS: correct some outdated function names
-
- Closes #3431
-
-- docs/version.d: mention MultiSSL
-
- Reviewed-by: Daniel Gustafsson
- Closes #3432
-
-Daniel Gustafsson (2 Jan 2019)
-- [Rikard Falkeborn brought this change]
-
- examples: Update .gitignore
-
- Add a few missing examples to make `make examples` not leave the
- workspace in a dirty state.
-
- Closes #3427
- Reviewed-by: Daniel Gustafsson <daniel@yesql.se>
-
-- THANKS: add more missing names
-
- Add Adrian Burcea who made the artwork for the curl://up 2018 event
- which was held in Stockholm, Sweden.
-
-- docs: mention potential leak in curl_slist_append
-
- When a non-empty list is appended to, and used as the returnvalue,
- the list pointer can leak in case of an allocation failure in the
- curl_slist_append() call. This is correctly handled in curl code
- usage but we weren't explicitly pointing it out in the API call
- documentation. Fix by extending the RETURNVALUE manpage section
- and example code.
-
- Closes #3424
- Reported-by: dnivras on github
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
-
-Marcel Raad (1 Jan 2019)
-- tvnow: silence conversion warnings
-
- MinGW-w64 defaults to targeting Windows 7 now, so GetTickCount64 is
- used and the milliseconds are represented as unsigned long long,
- leading to a compiler warning when implicitly converting them to long.
-
-Daniel Stenberg (1 Jan 2019)
-- THANKS: dedupe more names
-
- Researched-by: Tae Wong
-
-Marcel Raad (1 Jan 2019)
-- [Markus Moeller brought this change]
-
- ntlm: update selection of type 3 response
-
- NTLM2 did not work i.e. no NTLMv2 response was created. Changing the
- check seems to work.
-
- Ref: https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-NLMP/[MS-NLMP].pdf
-
- Fixes https://github.com/curl/curl/issues/3286
- Closes https://github.com/curl/curl/pull/3287
- Closes https://github.com/curl/curl/pull/3415
-
-Daniel Stenberg (31 Dec 2018)
-- THANKS: added missing names from year <= 2000
-
- Due to a report of a missing name in THANKS I manually went through an
- old CHANGES.0 file and added many previously missing names here.
-
-Daniel Gustafsson (30 Dec 2018)
-- urlapi: fix parsing ipv6 with zone index
-
- The previous fix for parsing IPv6 URLs with a zone index was a paddle
- short for URLs without an explicit port. This patch fixes that case
- and adds a unit test case.
-
- This bug was highlighted by issue #3408, and while it's not the full
- fix for the problem there it is an isolated bug that should be fixed
- regardless.
-
- Closes #3411
- Reported-by: GitYuanQu on github
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
-
-Daniel Stenberg (30 Dec 2018)
-- THANKS: dedupe Guenter Knauf
-
- Reported-by: Tae Wong
-
-- THANKS: missing name from the 6.3.1 release!
-
-Daniel Gustafsson (27 Dec 2018)
-- RELEASE-NOTES: synced
-
-- [Claes Jakobsson brought this change]
-
- hostip: support wildcard hosts
-
- This adds support for wildcard hosts in CURLOPT_RESOLVE. These are
- try-last so any non-wildcard entry is resolved first. If specified,
- any host not matched by another CURLOPT_RESOLVE config will use this
- as fallback.
-
- Example send a.com to 10.0.0.1 and everything else to 10.0.0.2:
- curl --resolve *:443:10.0.0.2 --resolve a.com:443:10.0.0.1 \
- https://a.com https://b.com
-
- This is probably quite similar to using:
- --connect-to a.com:443:10.0.0.1:443 --connect-to :443:10.0.0.2:443
-
- Closes #3406
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
-
-- url: fix incorrect indentation
-
-Patrick Monnerat (26 Dec 2018)
-- os400: upgrade ILE/RPG binding.
-
- - Trailer function support.
- - http 0.9 option.
- - curl_easy_upkeep.
-
-Daniel Gustafsson (25 Dec 2018)
-- FAQ: remove mention of sourceforge for github
-
- The project bug tracker is no longer hosted at sourceforge but is now
- hosted on the curl Github page. Update the FAQ to reflect.
-
- Closes #3410
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
-
-- openvms: fix typos in documentation
-
-- openvms: fix OpenSSL discovery on VAX
-
- The DCL code had a typo in one of the commands which would make the
- OpenSSL discovery on VAX fail. The correct syntax is F$ENVIRONMENT.
-
- Closes #3407
- Reviewed-by: Viktor Szakats <commit@vszakats.net>
-
-Daniel Stenberg (24 Dec 2018)
-- [Ruslan Baratov brought this change]
-
- cmake: use lowercase for function name like the rest of the code
-
- Reviewed-by: Sergei Nikulov
-
- closes #3196
-
-- Revert "libssh: no data pointer == nothing to do"
-
- This reverts commit c98ee5f67f497195c9 since commit f3ce38739fa fixed the
- problem in a more generic way.
-
-- disconnect: set conn->data for protocol disconnect
-
- Follow-up to fb445a1e18d: Set conn->data explicitly to point out the
- current transfer when invoking the protocol-specific disconnect function
- so that it can work correctly.
-
- Bug: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=12173
-
-Jay Satiro (23 Dec 2018)
-- [Pavel Pavlov brought this change]
-
- timeval: Use high resolution timestamps on Windows
-
- - Use QueryPerformanceCounter on Windows Vista+
-
- There is confusing info floating around that QueryPerformanceCounter
- can leap etc, which might have been true long time ago, but no longer
- the case nowadays (perhaps starting from WinXP?). Also, boost and
- std::chrono::steady_clock use QueryPerformanceCounter in a similar way.
-
- Prior to this change GetTickCount or GetTickCount64 was used, which has
- lower resolution. That is still the case for <= XP.
-
- Fixes https://github.com/curl/curl/issues/3309
- Closes https://github.com/curl/curl/pull/3318
-
-Daniel Stenberg (22 Dec 2018)
-- libssh: no data pointer == nothing to do
-
-- conncache_unlock: avoid indirection by changing input argument type
-
-- disconnect: separate connections and easy handles better
-
- Do not assume/store assocation between a given easy handle and the
- connection if it can be avoided.
-
- Long-term, the 'conn->data' pointer should probably be removed as it is a
- little too error-prone. Still used very widely though.
-
- Reported-by: masbug on github
- Fixes #3391
- Closes #3400
-
-- libssh: free sftp_canonicalize_path() data correctly
-
- Assisted-by: Harry Sintonen
-
- Fixes #3402
- Closes #3403
-
-- RELEASE-NOTES: synced
-
-- http: added options for allowing HTTP/0.9 responses
-
- Added CURLOPT_HTTP09_ALLOWED and --http0.9 for this purpose.
-
- For now, both the tool and library allow HTTP/0.9 by default.
- docs/DEPRECATE.md lays out the plan for when to reverse that default: 6
- months after the 7.64.0 release. The options are added already now so
- that applications/scripts can start using them already now.
-
- Fixes #2873
- Closes #3383
-
-- if2ip: remove unused function Curl_if_is_interface_name
-
- Closes #3401
-
-- http2: clear pause stream id if it gets closed
-
- Reported-by: Florian Pritz
-
- Fixes #3392
- Closes #3399
-
-Daniel Gustafsson (20 Dec 2018)
-- [David Garske brought this change]
-
- wolfssl: Perform cleanup
-
- This adds a cleanup callback for cyassl. Resolves possible memory leak
- when using ECC fixed point cache.
-
- Closes #3395
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
- Reviewed-by: Daniel Gustafsson <daniel@yesql.se>
-
-Daniel Stenberg (20 Dec 2018)
-- mbedtls: follow-up VERIFYHOST fix from f097669248
-
- Fix-by: Eric Rosenquist
-
- Fixes #3376
- Closes #3390
-
-- curlver: bump to 7.64.0 for next release
-
-Daniel Gustafsson (19 Dec 2018)
-- cookies: extend domain checks to non psl builds
-
- Ensure to perform the checks we have to enforce a sane domain in
- the cookie request. The check for non-PSL enabled builds is quite
- basic but it's better than nothing.
-
- Closes #2964
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
-
-Daniel Stenberg (19 Dec 2018)
-- [Matus Uzak brought this change]
-
- smb: fix incorrect path in request if connection reused
-
- Follow-up to 09e401e01bf9. If connection gets reused, then data member
- will be copied, but not the proto member. As a result, in smb_do(),
- path has been set from the original proto.share data.
-
- Closes #3388
-
-- curl -J: do not append to the destination file
-
- Reported-by: Kamil Dudka
- Fixes #3380
- Closes #3381
-
-- mbedtls: use VERIFYHOST
-
- Previously, VERIFYPEER would enable/disable all checks.
-
- Reported-by: Eric Rosenquist
- Fixes #3376
- Closes #3380
-
-- pingpong: change default response timeout to 120 seconds
-
- Previously it was 30 minutes
-
-- pingpong: ignore regular timeout in disconnect phase
-
- The timeout set with CURLOPT_TIMEOUT is no longer used when
- disconnecting from one of the pingpong protocols (FTP, IMAP, SMTP,
- POP3).
-
- Reported-by: jasal82 on github
-
- Fixes #3264
- Closes #3374
-
-- TODO: Windows: set attribute 'archive' for completed downloads
-
- Closes #3354
-
-- RELEASE-NOTES: synced
-
-- http: minor whitespace cleanup from f464535b
-
-- [Ayoub Boudhar brought this change]
-
- http: Implement trailing headers for chunked transfers
-
- This adds the CURLOPT_TRAILERDATA and CURLOPT_TRAILERFUNCTION
- options that allow a callback based approach to sending trailing headers
- with chunked transfers.
-
- The test server (sws) was updated to take into account the detection of the
- end of transfer in the case of trailing headers presence.
-
- Test 1591 checks that trailing headers can be sent using libcurl.
-
- Closes #3350
-
-- darwinssl: accept setting max-tls with default min-tls
-
- Reported-by: Andrei Neculau
- Fixes #3367
- Closes #3373
-
-- gopher: fix memory leak from 9026083ddb2a9
-
-- [Leonardo Taccari brought this change]
-
- test1201: Add a trailing `?' to the selector
-
- This verify that the `?' in the selector is kept as is.
-
- Verifies the fix in #3370
-
-- [Leonardo Taccari brought this change]
-
- gopher: always include the entire gopher-path in request
-
- After the migration to URL API all octets in the selector after the
- first `?' were interpreted as query and accidentally discarded and not
- passed to the server.
-
- Add a gopherpath to always concatenate possible path and query URL
- pieces.
-
- Fixes #3369
- Closes #3370
-
-- [Leonardo Taccari brought this change]
-
- urlapi: distinguish possibly empty query
-
- If just a `?' to indicate the query is passed always store a zero length
- query instead of having a NULL query.
-
- This permits to distinguish URL with trailing `?'.
-
- Fixes #3369
- Closes #3370
-
-Daniel Gustafsson (13 Dec 2018)
-- OS400: handle memory error in list conversion
-
- Curl_slist_append_nodup() returns NULL when it fails to create a new
- item for the specified list, and since the coding here reassigned the
- new list on top of the old list it would result in a dangling pointer
- and lost memory. Also, in case we hit an allocation failure at some
- point during the conversion, with allocation succeeding again on the
- subsequent call(s) we will return a truncated list around the malloc
- failure point. Fix by assigning to a temporary list pointer, which can
- be checked (which is the common pattern for slist appending), and free
- all the resources on allocation failure.
-
- Closes #3372
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
-
-- cookies: leave secure cookies alone
-
- Only allow secure origins to be able to write cookies with the
- 'secure' flag set. This reduces the risk of non-secure origins
- to influence the state of secure origins. This implements IETF
- Internet-Draft draft-ietf-httpbis-cookie-alone-01 which updates
- RFC6265.
-
- Closes #2956
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
-
-Daniel Stenberg (13 Dec 2018)
-- docs: fix the --tls-max description
-
- Reported-by: Tobias Lindgren
- Pointed out in #3367
-
- Closes #3368
-
-Daniel Gustafsson (12 Dec 2018)
-- urlapi: Fix port parsing of eol colon
-
- A URL with a single colon without a portnumber should use the default
- port, discarding the colon. Fix, add a testcase and also do little bit
- of comment wordsmithing.
-
- Closes #3365
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
-
-Version 7.63.0 (12 Dec 2018)
-
-Daniel Stenberg (12 Dec 2018)
-- RELEASE-NOTES: 7.63.0
-
-- THANKS: from the curl 7.62.0 cycle
-
-- test1519: use lib1518 and test CURLINFO_REDIRECT_URL more
-
-- Curl_follow: extract the Location: header field unvalidated
-
- ... when not actually following the redirect. Otherwise we return error
- for this and an application can't extract the value.
-
- Test 1518 added to verify.
-
- Reported-by: Pavel Pavlov
- Fixes #3340
- Closes #3364
-
-- multi: convert two timeout variables to timediff_t
-
- The time_t type is unsigned on some systems and these variables are used
- to hold return values from functions that return timediff_t
- already. timediff_t is always a signed type.
-
- Closes #3363
-
-- delta: use --diff-filter on the git diff-tree invokes
-
- Suggested-by: Dave Reisner
-
-Patrick Monnerat (11 Dec 2018)
-- documentation: curl_formadd field and file names are now escaped
-
- Prior to 7.56.0, fieldnames and filenames were set in Content-Disposition
- header without special processing: this may lead to invalid RFC 822
- quoted-strings.
- 7.56.0 introduces escaping of backslashes and double quotes in these names:
- mention it in the documentation.
-
- Reported-by: daboul on github
- Closes #3361
-
-Daniel Stenberg (11 Dec 2018)
-- scripts/delta: show repo delta info from last release
-
- ... where "last release" should be the git tag in the repo.
-
-Daniel Gustafsson (11 Dec 2018)
-- tests: add urlapi unittest
-
- This adds a new unittest intended to cover the internal functions in
- the urlapi code, starting with parse_port(). In order to avoid name
- collisions in debug builds, parse_port() is renamed Curl_parse_port()
- since it will be exported.
-
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
- Reviewed-by: Marcel Raad <Marcel.Raad@teamviewer.com>
-
-- urlapi: fix portnumber parsing for ipv6 zone index
-
- An IPv6 URL which contains a zone index includes a '%%25<zode id>'
- string before the ending ']' bracket. The parsing logic wasn't set
- up to cope with the zone index however, resulting in a malformed url
- error being returned. Fix by breaking the parsing into two stages
- to correctly handle the zone index.
-
- Closes #3355
- Closes #3319
- Reported-by: tonystz on Github
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
- Reviewed-by: Marcel Raad <Marcel.Raad@teamviewer.com>
-
-Daniel Stenberg (11 Dec 2018)
-- [Jay Satiro brought this change]
-
- http: fix HTTP auth to include query in URI
-
- - Include query in the path passed to generate HTTP auth.
-
- Recent changes to use the URL API internally (46e1640, 7.62.0)
- inadvertently broke authentication URIs by omitting the query.
-
- Fixes https://github.com/curl/curl/issues/3353
- Closes #3356
-
-- [Michael Kaufmann brought this change]
-
- http: don't set CURLINFO_CONDITION_UNMET for http status code 204
-
- The http status code 204 (No Content) should not change the "condition
- unmet" flag. Only the http status code 304 (Not Modified) should do
- this.
-
- Closes #359
-
-- [Samuel Surtees brought this change]
-
- ldap: fix LDAP URL parsing regressions
-
- - Match URL scheme with LDAP and LDAPS
- - Retrieve attributes, scope and filter from URL query instead
-
- Regression brought in 46e164069d1a5230 (7.62.0)
-
- Closes #3362
-
-- RELEASE-NOTES: synced
-
-- [Stefan Kanthak brought this change]
-
- (lib)curl.rc: fixup for minor bugs
-
- All resources defined in lib/libcurl.rc and curl.rc are language
- neutral.
-
- winbuild/MakefileBuild.vc ALWAYS defines the macro DEBUGBUILD, so the
- ifdef's in line 33 of lib/libcurl.rc and src/curl.rc are wrong.
-
- Replace the hard-coded constants in both *.rc files with #define'd
- values.
-
- Thumbs-uped-by: Rod Widdowson, Johannes Schindelin
- URL: https://curl.haxx.se/mail/lib-2018-11/0000.html
- Closes #3348
-
-- test329: verify cookie max-age=0 immediate expiry
-
-- cookies: expire "Max-Age=0" immediately
-
- Reported-by: Jeroen Ooms
- Fixes #3351
- Closes #3352
-
-- [Johannes Schindelin brought this change]
-
- Upon HTTP_1_1_REQUIRED, retry the request with HTTP/1.1
-
- This is a companion patch to cbea2fd2c (NTLM: force the connection to
- HTTP/1.1, 2018-12-06): with NTLM, we can switch to HTTP/1.1
- preemptively. However, with other (Negotiate) authentication it is not
- clear to this developer whether there is a way to make it work with
- HTTP/2, so let's try HTTP/2 first and fall back in case we encounter the
- error HTTP_1_1_REQUIRED.
-
- Note: we will still keep the NTLM workaround, as it avoids an extra
- round trip.
-
- Daniel Stenberg helped a lot with this patch, in particular by
- suggesting to introduce the Curl_h2_http_1_1_error() function.
-
- Closes #3349
-
- Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
-
-- [Ben Greear brought this change]
-
- openssl: fix unused variable compiler warning with old openssl
-
- URL: https://curl.haxx.se/mail/lib-2018-11/0055.html
-
- Closes #3347
-
-- [Johannes Schindelin brought this change]
-
- NTLM: force the connection to HTTP/1.1
-
- Since v7.62.0, cURL tries to use HTTP/2 whenever the server announces
- the capability. However, NTLM authentication only works with HTTP/1.1,
- and will likely remain in that boat (for details, see
- https://docs.microsoft.com/en-us/iis/get-started/whats-new-in-iis-10/http2-on-iis#when-is-http2-not-supported).
-
- When we just found out that we want to use NTLM, and when the current
- connection runs in HTTP/2 mode, let's force the connection to be closed
- and to be re-opened using HTTP/1.1.
-
- Fixes https://github.com/curl/curl/issues/3341.
- Closes #3345
-
- Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
-
-- [Johannes Schindelin brought this change]
-
- curl_global_sslset(): id == -1 is not necessarily an error
-
- It is allowed to call that function with id set to -1, specifying the
- backend by the name instead. We should imitate what is done further down
- in that function to allow for that.
-
- Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
-
- Closes #3346
-
-Johannes Schindelin (6 Dec 2018)
-- .gitattributes: make tabs in indentation a visible error
-
- Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
-
-Daniel Stenberg (6 Dec 2018)
-- RELEASE-NOTES: synced
-
-- doh: fix memory leak in OOM situation
-
- Reviewed-by: Daniel Gustafsson
- Closes #3342
-
-- doh: make it work for h2-disabled builds too
-
- Reported-by: dtmsecurity at github
- Fixes #3325
- Closes #3336
-
-- packages: remove old leftover files and dirs
-
- This subdir has mostly become an attic of never-used cruft from the
- past.
-
- Closes #3331
-
-- [Gergely Nagy brought this change]
-
- openssl: do not use file BIOs if not requested
-
- Moves the file handling BIO calls to the branch of the code where they
- are actually used.
-
- Closes #3339
-
-- [Paul Howarth brought this change]
-
- nss: Fix compatibility with nss versions 3.14 to 3.15
-
-- [Paul Howarth brought this change]
-
- nss: Improve info message when falling back SSL protocol
-
- Use descriptive text strings rather than decimal numbers.
-
-- [Paul Howarth brought this change]
-
- nss: Fall back to latest supported SSL version
-
- NSS may be built without support for the latest SSL/TLS versions,
- leading to "SSL version range is not valid" errors when the library
- code supports a recent version (e.g. TLS v1.3) but it has explicitly
- been disabled.
-
- This change adjusts the maximum SSL version requested by libcurl to
- be the maximum supported version at runtime, as long as that version
- is at least as high as the minimum version required by libcurl.
-
- Fixes #3261
-
-Daniel Gustafsson (3 Dec 2018)
-- travis: enable COPYRIGHTYEAR extended warning
-
- The extended warning for checking incorrect COPYRIGHTYEAR is quite
- expensive to run, so rather than expecting every developer to do it
- we ensure it's turned on locally for Travis.
-
-- checksrc: add COPYRIGHTYEAR check
-
- Forgetting to bump the year in the copyright clause when hacking has
- been quite common among curl developers, but a traditional checksrc
- check isn't a good fit as it would penalize anyone hacking on January
- 1st (among other things). This adds a more selective COPYRIGHTYEAR
- check which intends to only cover the currently hacked on changeset.
-
- The check for updated copyright year is currently not enforced on all
- files but only on files edited and/or committed locally. This is due to
- the amount of files which aren't updated with their correct copyright
- year at the time of their respective commit.
-
- To further avoid running this expensive check for every developer, it
- adds a new local override mode for checksrc where a .checksrc file can
- be used to turn on extended warnings locally.
-
- Closes #3303
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
-
-Daniel Stenberg (3 Dec 2018)
-- CHECKSRC.md: document more warnings
-
- Closes #3335
- [ci skip]
-
-- RELEASE-NOTES: synced
-
-- SECURITY-PROCESS: bountygraph shuts down
-
- This backpedals back the documents to the state before bountygraph.
-
- Closes #3311
-
-- curl: fix memory leak reading --writeout from file
-
- If another string had been set first, the writout function for reading
- the syntax from file would leak the previously allocated memory.
-
- Reported-by: Brian Carpenter
- Fixes #3322
- Closes #3330
-
-- tool_main: rename function to make it unique and better
-
- ... there's already another function in the curl tool named
- free_config_fields!
-
-Daniel Gustafsson (29 Nov 2018)
-- TODO: remove CURLOPT_DNS_USE_GLOBAL_CACHE entry
-
- Commit 7c5837e79280e6abb3ae143dfc49bca5e74cdd11 deprecated the option
- making it a manual code-edit operation to turn it back on. The removal
- process has thus started and is now documented in docs/DEPRECATE.md so
- remove from the TODO to avoid anyone looking for something to pick up
- spend cycles on an already in-progress entry.
-
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
-
-Jay Satiro (29 Nov 2018)
-- [Sevan Janiyan brought this change]
-
- connect: fix building for recent versions of Minix
-
- EBADIOCTL doesn't exist on more recent Minix.
- There have also been substantial changes to the network stack.
- Fixes build on Minix 3.4rc
-
- Closes https://github.com/curl/curl/pull/3323
-
-- [Konstantin Kushnir brought this change]
-
- CMake: fix MIT/Heimdal Kerberos detection
-
- - fix syntax error in FindGSS.cmake
- - correct krb5 include directory. FindGSS exports
- "GSS_INCLUDE_DIR" variable.
-
- Closes https://github.com/curl/curl/pull/3316
-
-Daniel Stenberg (28 Nov 2018)
-- test328: verify Content-Encoding: none
-
- Because of issue #3315
-
- Closes #3317
-
-- [James Knight brought this change]
-
- configure: include all libraries in ssl-libs fetch
-
- When compiling a collection of SSL libraries to link against (SSL_LIBS),
- ensure all libraries are included. The call `--libs-only-l` can produce
- only a subset of found in a `--libs` call (e.x. pthread may be excluded).
- Adding `--libs-only-other` ensures other libraries are also included in
- the list. This corrects select build environments compiling against a
- static version of OpenSSL. Before the change, the following could be
- observed:
-
- checking for openssl options with pkg-config... found
- configure: pkg-config: SSL_LIBS: "-lssl -lz -ldl -lcrypto -lz -ldl "
- configure: pkg-config: SSL_LDFLAGS: "-L/home/jdknight/<workdir>/staging/usr/lib -L/home/jdknight/<workdir>/staging/usr/lib "
- configure: pkg-config: SSL_CPPFLAGS: "-I/home/jdknight/<workdir>/staging/usr/include "
- checking for HMAC_Update in -lcrypto... no
- checking for HMAC_Init_ex in -lcrypto... no
- checking OpenSSL linking with -ldl... no
- checking OpenSSL linking with -ldl and -lpthread... no
- configure: WARNING: SSL disabled, you will not be able to use HTTPS, FTPS, NTLM and more.
- configure: WARNING: Use --with-ssl, --with-gnutls, --with-polarssl, --with-cyassl, --with-nss, --with-axtls, --with-winssl, or --with-darwinssl to address this.
- ...
- SSL support: no (--with-{ssl,gnutls,nss,polarssl,mbedtls,cyassl,axtls,winssl,darwinssl} )
- ...
-
- And include the other libraries when compiling SSL_LIBS succeeds with:
-
- checking for openssl options with pkg-config... found
- configure: pkg-config: SSL_LIBS: "-lssl -lz -ldl -pthread -lcrypto -lz -ldl -pthread "
- configure: pkg-config: SSL_LDFLAGS: "-L/home/jdknight/<workdir>/staging/usr/lib -L/home/jdknight/<workdir>/staging/usr/lib "
- configure: pkg-config: SSL_CPPFLAGS: "-I/home/jdknight/<workdir>/staging/usr/include "
- checking for HMAC_Update in -lcrypto... yes
- checking for SSL_connect in -lssl... yes
- ...
- SSL support: enabled (OpenSSL)
- ...
-
- Signed-off-by: James Knight <james.d.knight@live.com>
- Closes #3193
-
-Daniel Gustafsson (26 Nov 2018)
-- doh: fix typo in infof call
-
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
-
-- cmdline-opts/gen.pl: define the correct varname
-
- The variable definition had a small typo making it declare another
- variable then the intended.
-
- Closes #3304
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
-
-Daniel Stenberg (25 Nov 2018)
-- RELEASE-NOTES: synced
-
-- curl_easy_perform: fix timeout handling
-
- curl_multi_wait() was erroneously used from within
- curl_easy_perform(). It could lead to it believing there was no socket
- to wait for and then instead sleep for a while instead of monitoring the
- socket and then miss acting on that activity as swiftly as it should
- (causing an up to 1000 ms delay).
-
- Reported-by: Antoni Villalonga
- Fixes #3305
- Closes #3306
- Closes #3308
-
-- CURLOPT_WRITEFUNCTION.3: spell out that it gets called many times
-
-- cookies: create the cookiejar even if no cookies to save
-
- Important for when the file is going to be read again and thus must not
- contain old contents!
-
- Adds test 327 to verify.
-
- Reported-by: daboul on github
- Fixes #3299
- Closes #3300
-
-- checksrc: ban snprintf use, add command line flag to override warns
-
-- snprintf: renamed and we now only use msnprintf()
-
- The function does not return the same value as snprintf() normally does,
- so readers may be mislead into thinking the code works differently than
- it actually does. A different function name makes this easier to detect.
-
- Reported-by: Tomas Hoger
- Assisted-by: Daniel Gustafsson
- Fixes #3296
- Closes #3297
-
-- [Tobias Hintze brought this change]
-
- test: update test20/1322 for eglibc bug workaround
-
- The tests 20 and 1322 are using getaddrinfo of libc for resolving. In
- eglibc-2.19 there is a memory leakage and invalid free bug which
- surfaces in some special circumstances (PF_UNSPEC hint with invalid or
- non-existent names). The valgrind runs in testing fail in these
- situations.
-
- As the tests 20/1322 are not specific on either protocol (IPv4/IPv6)
- this commit changes the hints to IPv4 protocol by passing `--ipv4` flag
- on the tests' command line. This prevents the valgrind failures.
-
-- [Tobias Hintze brought this change]
-
- host names: allow trailing dot in name resolve, then strip it
-
- Delays stripping of trailing dots to after resolving the hostname.
-
- Fixes #3022
- Closes #3222
-
-- [UnknownShadow200 brought this change]
-
- CURLOPT_HEADERFUNCTION.3: match 'nitems' name in synopsis and description
-
- Closes #3295
-
-Daniel Gustafsson (21 Nov 2018)
-- configure: Fix typo in comment
-
-Michael Kaufmann (21 Nov 2018)
-- openssl: support session resume with TLS 1.3
-
- Session resumption information is not available immediately after a TLS 1.3
- handshake. The client must wait until the server has sent a session ticket.
-
- Use OpenSSL's "new session" callback to get the session information and put it
- into curl's session cache. For TLS 1.3 sessions, this callback will be invoked
- after the server has sent a session ticket.
-
- The "new session" callback is invoked only if OpenSSL's session cache is
- enabled, so enable it and use the "external storage" mode which lets curl manage
- the contents of the session cache.
-
- A pointer to the connection data and the sockindex are now saved as "SSL extra
- data" to make them available to the callback.
-
- This approach also works for old SSL/TLS versions and old OpenSSL versions.
-
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
-
- Fixes #3202
- Closes #3271
-
-- ssl: fix compilation with OpenSSL 0.9.7
-
- - ENGINE_cleanup() was used without including "openssl/engine.h"
- - enable engine support for OpenSSL 0.9.7
-
- Closes #3266
-
-Daniel Stenberg (21 Nov 2018)
-- openssl: disable TLS renegotiation with BoringSSL
-
- Since we're close to feature freeze, this change disables this feature
- with an #ifdef. Define ALLOW_RENEG at build-time to enable.
-
- This could be converted to a bit for CURLOPT_SSL_OPTIONS to let
- applications opt-in this.
-
- Concern-raised-by: David Benjamin
- Fixes #3283
- Closes #3293
-
-- [Romain Fliedel brought this change]
-
- ares: remove fd from multi fd set when ares is about to close the fd
-
- When using c-ares for asyn dns, the dns socket fd was silently closed
- by c-ares without curl being aware. curl would then 'realize' the fd
- has been removed at next call of Curl_resolver_getsock, and only then
- notify the CURLMOPT_SOCKETFUNCTION to remove fd from its poll set with
- CURL_POLL_REMOVE. At this point the fd is already closed.
-
- By using ares socket state callback (ARES_OPT_SOCK_STATE_CB), this
- patch allows curl to be notified that the fd is not longer needed
- for neither for write nor read. At this point by calling
- Curl_multi_closed we are able to notify multi with CURL_POLL_REMOVE
- before the fd is actually closed by ares.
-
- In asyn-ares.c Curl_resolver_duphandle we can't use ares_dup anymore
- since it does not allow passing a different sock_state_cb_data
-
- Closes #3238
-
-- [Romain Fliedel brought this change]
-
- examples/ephiperfifo: report error when epoll_ctl fails
-
-Daniel Gustafsson (20 Nov 2018)
-- [pkubaj brought this change]
-
- ntlm: Remove redundant ifdef USE_OPENSSL
-
- lib/curl_ntlm.c had code that read as follows:
-
- #ifdef USE_OPENSSL
- # ifdef USE_OPENSSL
- # else
- # ..
- # endif
- #endif
-
- Remove the redundant USE_OPENSSL along with #else (it's not possible to
- reach it anyway). The removed construction is a leftover from when the
- SSLeay support was removed.
-
- Closes #3269
- Reviewed-by: Daniel Gustafsson <daniel@yesql.se>
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
-
-Daniel Stenberg (20 Nov 2018)
-- [Han Han brought this change]
-
- ssl: replace all internal uses of CURLE_SSL_CACERT
-
- Closes #3291
-
-Han Han (19 Nov 2018)
-- docs: add more description to unified ssl error codes
-
-- curle: move deprecated error code to ifndef block
-
-Patrick Monnerat (19 Nov 2018)
-- os400: add CURLOPT_CURLU to ILE/RPG binding.
-
-- os400: Add curl_easy_conn_upkeep() to ILE/RPG binding.
-
-- os400: fix return type of curl_easy_pause() in ILE/RPG binding.
-
-Daniel Stenberg (19 Nov 2018)
-- RELEASE-NOTES: synced
-
-- impacket: add LICENSE
-
- The license for the impacket package was not in our tree.
-
- Imported now from upstream's
- https://github.com/SecureAuthCorp/impacket/blob/master/LICENSE
-
- Reported-by: infinnovation-dev on github
- Fixes #3276
- Closes #3277
-
-Daniel Gustafsson (18 Nov 2018)
-- tool_doswin: Fix uninitialized field warning
-
- The partial struct initialization in 397664a065abffb7c3445ca9 caused
- a warning on uninitialized MODULEENTRY32 struct members:
-
- /src/tool_doswin.c:681:3: warning: missing initializer for field
- 'th32ModuleID' of 'MODULEENTRY32 {aka struct tagMODULEENTRY32}'
- [-Wmissing-field-initializers]
-
- This is sort of a bogus warning as the remaining members will be set
- to zero by the compiler, as all omitted members are. Nevertheless,
- remove the warning by omitting all members and setting the dwSize
- members explicitly.
-
- Closes #3254
- Reviewed-by: Marcel Raad <Marcel.Raad@teamviewer.com>
- Reviewed-by: Jay Satiro <raysatiro@yahoo.com>
-
-- openssl: Remove SSLEAY leftovers
-
- Commit 709cf76f6bb7dbac deprecated USE_SSLEAY, as curl since long isn't
- compatible with the SSLeay library. This removes the few leftovers that
- were omitted in the less frequently used platform targets.
-
- Closes #3270
- Reviewed-by: Daniel Stenberg <daniel@haxx.se>
-
-Daniel Stenberg (16 Nov 2018)
-- [Elia Tufarolo brought this change]
-
- http_negotiate: do not close connection until negotiation is completed
-
- Fix HTTP POST using CURLAUTH_NEGOTIATE.
-
- Closes #3275
-
-- pop3: only do APOP with a valid timestamp
-
- Brought-by: bobmitchell1956 on github
- Fixes #3278
- Closes #3279
-
-Jay Satiro (16 Nov 2018)
-- [Peter Wu brought this change]
-
- openssl: do not log excess "TLS app data" lines for TLS 1.3
-
- The SSL_CTX_set_msg_callback callback is not just called for the
- Handshake or Alert protocols, but also for the raw record header
- (SSL3_RT_HEADER) and the decrypted inner record type
- (SSL3_RT_INNER_CONTENT_TYPE). Be sure to ignore the latter to avoid
- excess debug spam when using `curl -v` against a TLSv1.3-enabled server:
-
- * TLSv1.3 (IN), TLS app data, [no content] (0):
-
- (Following this message, another callback for the decrypted
- handshake/alert messages will be be present anyway.)
-
- Closes https://github.com/curl/curl/pull/3281
-
-Marc Hoersken (15 Nov 2018)
-- tests: disable SO_EXCLUSIVEADDRUSE for stunnel on Windows
-
- SO_EXCLUSIVEADDRUSE is on by default on Vista or newer,
- but does not work together with SO_REUSEADDR being on.
-
- The default changes were made with stunnel 5.34 and 5.35.
-
-Daniel Stenberg (13 Nov 2018)
-- [Kamil Dudka brought this change]
-
- nss: remove version selecting dead code
-
- Closes #3262
-
-- nss: set default max-tls to 1.3/1.2
-
- Fixes #3261
-
-Daniel Gustafsson (13 Nov 2018)
-- tool_cb_wrt: Silence function cast compiler warning
-
- Commit 5bfaa86ceb3c2a9ac474a928e748c4a86a703b33 introduced a new
- compiler warning on Windows cross compilation with GCC. See below
- for an example of the warning from the autobuild logs (whitespace
- edited to fit):
-
- /src/tool_cb_wrt.c:175:9: warning: cast from function call of type
- 'intptr_t {aka long long int}' to non-matching type 'void *'
- [-Wbad-function-cast]
- (HANDLE) _get_osfhandle(fileno(outs->stream)),
- ^
-
- Store the return value from _get_osfhandle() in an intermediate
- variable and cast the variable in WriteConsoleW() rather than the
- function call directly to avoid a compiler warning.
-
- In passing, also add inspection of the MultiByteToWideChar() return
- value and return failure in case an error is reported.
-
- Closes #3263
- Reviewed-by: Marcel Raad <Marcel.Raad@teamviewer.com>
- Reviewed-by: Viktor Szakats <commit@vszakats.net>
-
-Daniel Stenberg (12 Nov 2018)
-- nss: fix fallthrough comment to fix picky compiler warning
-
-- docs: expanded on some CURLU details
-
-- [Tim Rühsen brought this change]
-
- ftp: avoid two unsigned int overflows in FTP listing parser
-
- Curl_ftp_parselist: avoid unsigned integer overflows
-
- The overflow has no real world impact, just avoid it for "best
- practice".
-
- Closes #3225
diff --git a/libs/libcurl/docs/THANKS b/libs/libcurl/docs/THANKS
index 73b84cfdb3..884906ae26 100644
--- a/libs/libcurl/docs/THANKS
+++ b/libs/libcurl/docs/THANKS
@@ -51,6 +51,7 @@ Alex Chan
Alex Fishman
Alex Grebenschikov
Alex Gruz
+Alex Konev
Alex Malinovich
Alex Mayorga
Alex McLellan
@@ -58,6 +59,7 @@ Alex Neblett
Alex Nichols
Alex Potapenko
Alex Rousskov
+Alex Samorukov
Alex Suykov
Alex Vinnik
Alex aka WindEagle
@@ -116,6 +118,7 @@ Andrei Karas
Andrei Kurushin
Andrei Neculau
Andrei Sedoi
+Andrei Valeriu BICA
Andrei Virtosu
Andrej E Baranov
Andrew Benham
@@ -177,9 +180,11 @@ Balaji Salunke
Balazs Kovacsics
Balint Szilakszi
Barry Abrahamson
+Barry Pollard
Bart Whiteley
Bas Mevissen
Bas van Schaik
+Bastien Bouclet
Basuke Suzuki
Ben Boeckel
Ben Darnell
@@ -257,6 +262,7 @@ Bruno Thomsen
Bruno de Carvalho
Bryan Henderson
Bryan Kemp
+Bylon2 on github
Byrial Jensen
Caleb Raitto
Cameron Kaiser
@@ -304,7 +310,9 @@ Christian Schmitz
Christian Stewart
Christian Vogt
Christian Weisgerber
+Christoph M. Becker
Christophe Demory
+Christophe Dervieux
Christophe Legry
Christopher Conroy
Christopher Head
@@ -382,6 +390,7 @@ Daniel Romero
Daniel Schauenberg
Daniel Seither
Daniel Shahaf
+Daniel Silverstone
Daniel Steinberg
Daniel Stenberg
Daniel Theron
@@ -436,6 +445,7 @@ David Woodhouse
David Wright
David Yan
Dengminwen
+Denis Chaplygin
Denis Feklushkin
Denis Ollier
Dennis Clarke
@@ -520,6 +530,7 @@ Elliot Saba
Ellis Pritchard
Elmira A Semenova
Emanuele Bovisio
+Emil Engler
Emil Lerner
Emil Romanus
Emiliano Ida
@@ -589,6 +600,7 @@ Forrest Cahoon
Francisco Moraes
Francisco Sedano
Francois Petitjean
+Francois Rivard
Frank Denis
Frank Gevaerts
Frank Hempel
@@ -622,6 +634,7 @@ Georg Horn
Georg Huettenegger
Georg Lippitsch
Georg Wicherski
+George Liu
Gerd v. Egidy
Gergely Nagy
Gerhard Herre
@@ -633,6 +646,7 @@ Gil Weber
Gilad
Gilbert Ramirez Jr.
Gilles Blanc
+Gilles Vollant
Giorgos Oikonomou
Gisle Vanem
GitYuanQu on github
@@ -657,6 +671,7 @@ Greg Rowe
Greg Zavertnik
Gregory Nicholls
Gregory Szorc
+Griffin Downs
Grigory Entin
Guenole Bescon
Guido Berhoerster
@@ -727,6 +742,7 @@ Ihor Karpenko
Iida Yosiaki
Ilguiz Latypov
Ilja van Sprundel
+Ilya Kosarev
Immanuel Gregoire
Inca R
Ingmar Runge
@@ -744,6 +760,7 @@ Ivo Bellin Salarin
Jack Zhang
Jackarain on github
Jacky Lam
+Jacob Barthelmeh
Jacob Meuser
Jacob Moshenko
Jactry Zeng
@@ -813,6 +830,7 @@ Jeff Phillips
Jeff Pohlmeyer
Jeff Weber
Jeffrey Walton
+Jens Finkhaeuser
Jens Rantil
Jens Schleusener
Jeremie Rapin
@@ -840,6 +858,7 @@ Jim Freeman
Jim Fuller
Jim Hollinger
Jim Meyering
+Jimmy Gaussen
Jiri Dvorak
Jiri Hruska
Jiri Jaburek
@@ -890,6 +909,7 @@ John Weismiller
John Wilkinson
John-Mark Bell
Johnny Luong
+Jojojov on github
Jon DeVree
Jon Grubbs
Jon Nelson
@@ -1070,6 +1090,7 @@ Luca Altea
Luca Boccassi
Lucas Adamski
Lucas Pardue
+Lucas Severo
Ludek Finstrle
Ludovico Cavedon
Ludwig Nussel
@@ -1107,6 +1128,7 @@ Marc Kleine-Budde
Marc Renault
Marc Schlatter
Marc-Antoine Perennou
+Marcel Hernandez
Marcel Raad
Marcel Roelofs
Marcelo Echeverria
@@ -1151,6 +1173,7 @@ Martin Drasar
Martin Dreher
Martin Frodl
Martin Galvan
+Martin Gartner
Martin Hager
Martin Hedenfalk
Martin Jansen
@@ -1283,6 +1306,7 @@ Nate Prewitt
Nathan Coulter
Nathan O'Sullivan
Nathanael Nerode
+Nathaniel J. Smith
Nathaniel Waisbrot
Naveen Chandran
Naveen Noel
@@ -1292,6 +1316,7 @@ Neil Bowers
Neil Dunbar
Neil Kolban
Neil Spring
+Niall O'Reilly
Nic Roets
Nicholas Maniscalco
Nick Draffen
@@ -1370,7 +1395,9 @@ Patrick Smith
Patrick Watson
Patrik Thunstrom
Pau Garcia i Quiles
+Paul B. Omta
Paul Donohue
+Paul Dreik
Paul Groke
Paul Harrington
Paul Harris
@@ -1415,6 +1442,7 @@ Peter Piekarski
Peter Silva
Peter Simonyi
Peter Su
+Peter Sumatra
Peter Sylvester
Peter Todd
Peter Varga
@@ -1438,6 +1466,7 @@ Philip Langdale
Philip Prindeville
Philipp Waehnert
Philippe Hameau
+Philippe Marguinaud
Philippe Raoult
Philippe Vaucher
Pierre
@@ -1446,6 +1475,7 @@ Pierre Chapuis
Pierre Joye
Pierre Ynard
Piotr Dobrogost
+Piotr Komborski
Po-Chuan Hsieh
Pooyan McSporran
Poul T Lomholt
@@ -1563,6 +1593,7 @@ Rodric Glaser
Rodrigo Silva
Roger Leigh
Roland Blom
+Roland Hieber
Roland Krikava
Roland Zimmermann
Rolf Eike Beer
@@ -1626,6 +1657,7 @@ Sean Burford
Sean MacLennan
Sean Miller
Sebastiaan van Erk
+Sebastian Haglund
Sebastian Mundry
Sebastian Pohlschmidt
Sebastian Rasmussen
@@ -1669,6 +1701,7 @@ Somnath Kundu
Song Ma
Sonia Subramanian
Spacen Jasset
+Spezifant on github
Spiridonoff A.V
Spork Schivago
Stadler Stephan
@@ -1714,8 +1747,10 @@ Steven G. Johnson
Steven Gu
Steven M. Schweda
Steven Parkes
+Stian Soiland-Reyes
Stoned Elipot
Stuart Henderson
+SumatraPeter on github
Sune Ahlgren
Sunny Purushe
Sven Anders
@@ -1827,6 +1862,7 @@ Toshiyuki Maezawa
Traian Nicolescu
Travis Burtrum
Travis Obenhaus
+Trivikram Kamat
Troels Walsted Hansen
Troy Engel
Tseng Jun
@@ -1840,6 +1876,7 @@ Ulrich Doehner
Ulrich Telle
Ulrich Zadow
Valentin David
+Valerii Zapodovnikov
Vasiliy Faronov
Vasily Lobaskin
Vasy Okhin
@@ -1850,6 +1887,7 @@ Victor Snezhko
Vijay Panghal
Vikram Saxena
Viktor Szakats
+Vilhelm Prytz
Ville Skyttä
Vilmos Nebehaj
Vincas Razma
@@ -1899,6 +1937,7 @@ Yang Tse
Yarram Sunil
Yasuharu Yamada
Yasuhiro Matsumoto
+Yechiel Kalmenson
Yehezkel Horowitz
Yehoshua Hershberg
Yi Huang
@@ -1966,6 +2005,7 @@ jonrumsey on github
joshhe on github
jungle-boogie on github
jveazey on github
+jzinn on github
ka7 on github
kreshano on github
l00p3r on Hackerone
@@ -1977,6 +2017,7 @@ masbug on github
mccormickt12 on github
migueljcrum on github
mkzero on github
+momala454 on github
moohoorama on github
nedres on github
neex on github
@@ -1984,6 +2025,7 @@ neheb on github
nevv on HackerOne/curl
niallor on github
nianxuejie on github
+nico-abram on github
niner on github
nk
nopjmp on github
diff --git a/libs/libcurl/include/curl/curl.h b/libs/libcurl/include/curl/curl.h
index ff0c774962..dcbe8995cb 100644
--- a/libs/libcurl/include/curl/curl.h
+++ b/libs/libcurl/include/curl/curl.h
@@ -2800,6 +2800,8 @@ typedef struct {
#define CURL_VERSION_ALTSVC (1<<24) /* Alt-Svc handling built-in */
#define CURL_VERSION_HTTP3 (1<<25) /* HTTP3 support built-in */
+#define CURL_VERSION_ESNI (1<<26) /* ESNI support */
+
/*
* NAME curl_version_info()
*
diff --git a/libs/libcurl/include/curl/curlver.h b/libs/libcurl/include/curl/curlver.h
index f537b9205e..cab09eebda 100644
--- a/libs/libcurl/include/curl/curlver.h
+++ b/libs/libcurl/include/curl/curlver.h
@@ -30,12 +30,12 @@
/* This is the version number of the libcurl package from which this header
file origins: */
-#define LIBCURL_VERSION "7.66.0"
+#define LIBCURL_VERSION "7.67.0"
/* The numeric version number is also available "in parts" by using these
defines: */
#define LIBCURL_VERSION_MAJOR 7
-#define LIBCURL_VERSION_MINOR 66
+#define LIBCURL_VERSION_MINOR 67
#define LIBCURL_VERSION_PATCH 0
/* This is the numeric version of the libcurl version number, meant for easier
@@ -57,7 +57,7 @@
CURL_VERSION_BITS() macro since curl's own configure script greps for it
and needs it to contain the full number.
*/
-#define LIBCURL_VERSION_NUM 0x074200
+#define LIBCURL_VERSION_NUM 0x074300
/*
* This is the date and time when the full source package was created. The
@@ -68,7 +68,7 @@
*
* "2007-11-23"
*/
-#define LIBCURL_TIMESTAMP "2019-09-11"
+#define LIBCURL_TIMESTAMP "2019-11-06"
#define CURL_VERSION_BITS(x,y,z) ((x)<<16|(y)<<8|(z))
#define CURL_AT_LEAST_VERSION(x,y,z) \
diff --git a/libs/libcurl/include/curl/multi.h b/libs/libcurl/include/curl/multi.h
index 02df0f3893..b392183954 100644
--- a/libs/libcurl/include/curl/multi.h
+++ b/libs/libcurl/include/curl/multi.h
@@ -396,6 +396,9 @@ typedef enum {
/* This is the argument passed to the server push callback */
CINIT(PUSHDATA, OBJECTPOINT, 15),
+ /* maximum number of concurrent streams to support on a connection */
+ CINIT(MAX_CONCURRENT_STREAMS, LONG, 16),
+
CURLMOPT_LASTENTRY /* the last unused */
} CURLMoption;
@@ -448,6 +451,9 @@ typedef int (*curl_push_callback)(CURL *parent,
struct curl_pushheaders *headers,
void *userp);
+/* value for MAXIMUM CONCURRENT STREAMS upper limit */
+#define INITIAL_MAX_CONCURRENT_STREAMS ((1U << 31) - 1)
+
#ifdef __cplusplus
} /* end of extern "C" */
#endif
diff --git a/libs/libcurl/include/curl/urlapi.h b/libs/libcurl/include/curl/urlapi.h
index 0f2f152f1f..f2d06770dc 100644
--- a/libs/libcurl/include/curl/urlapi.h
+++ b/libs/libcurl/include/curl/urlapi.h
@@ -77,6 +77,8 @@ typedef enum {
#define CURLU_URLENCODE (1<<7) /* URL encode on set */
#define CURLU_APPENDQUERY (1<<8) /* append a form style part */
#define CURLU_GUESS_SCHEME (1<<9) /* legacy curl-style guessing */
+#define CURLU_NO_AUTHORITY (1<<10) /* Allow empty authority when the
+ scheme is unknown. */
typedef struct Curl_URL CURLU;
diff --git a/libs/libcurl/src/Makefile.in b/libs/libcurl/src/Makefile.in
index 28dfe93e43..27101a8eea 100644
--- a/libs/libcurl/src/Makefile.in
+++ b/libs/libcurl/src/Makefile.in
@@ -229,7 +229,7 @@ am__objects_1 = libcurl_la-file.lo libcurl_la-timeval.lo \
libcurl_la-curl_path.lo libcurl_la-curl_ctype.lo \
libcurl_la-curl_range.lo libcurl_la-psl.lo libcurl_la-doh.lo \
libcurl_la-urlapi.lo libcurl_la-curl_get_line.lo \
- libcurl_la-altsvc.lo
+ libcurl_la-altsvc.lo libcurl_la-socketpair.lo
am__dirstamp = $(am__leading_dot)dirstamp
am__objects_2 = vauth/libcurl_la-vauth.lo \
vauth/libcurl_la-cleartext.lo vauth/libcurl_la-cram.lo \
@@ -315,7 +315,8 @@ am__objects_9 = libcurlu_la-file.lo libcurlu_la-timeval.lo \
libcurlu_la-setopt.lo libcurlu_la-curl_path.lo \
libcurlu_la-curl_ctype.lo libcurlu_la-curl_range.lo \
libcurlu_la-psl.lo libcurlu_la-doh.lo libcurlu_la-urlapi.lo \
- libcurlu_la-curl_get_line.lo libcurlu_la-altsvc.lo
+ libcurlu_la-curl_get_line.lo libcurlu_la-altsvc.lo \
+ libcurlu_la-socketpair.lo
am__objects_10 = vauth/libcurlu_la-vauth.lo \
vauth/libcurlu_la-cleartext.lo vauth/libcurlu_la-cram.lo \
vauth/libcurlu_la-digest.lo vauth/libcurlu_la-digest_sspi.lo \
@@ -444,6 +445,7 @@ am__depfiles_remade = ./$(DEPDIR)/libcurl_la-altsvc.Plo \
./$(DEPDIR)/libcurl_la-share.Plo \
./$(DEPDIR)/libcurl_la-slist.Plo \
./$(DEPDIR)/libcurl_la-smb.Plo ./$(DEPDIR)/libcurl_la-smtp.Plo \
+ ./$(DEPDIR)/libcurl_la-socketpair.Plo \
./$(DEPDIR)/libcurl_la-socks.Plo \
./$(DEPDIR)/libcurl_la-socks_gssapi.Plo \
./$(DEPDIR)/libcurl_la-socks_sspi.Plo \
@@ -554,6 +556,7 @@ am__depfiles_remade = ./$(DEPDIR)/libcurl_la-altsvc.Plo \
./$(DEPDIR)/libcurlu_la-slist.Plo \
./$(DEPDIR)/libcurlu_la-smb.Plo \
./$(DEPDIR)/libcurlu_la-smtp.Plo \
+ ./$(DEPDIR)/libcurlu_la-socketpair.Plo \
./$(DEPDIR)/libcurlu_la-socks.Plo \
./$(DEPDIR)/libcurlu_la-socks_gssapi.Plo \
./$(DEPDIR)/libcurlu_la-socks_sspi.Plo \
@@ -1000,7 +1003,7 @@ LIB_CFILES = file.c timeval.c base64.c hostip.c progress.c formdata.c \
curl_multibyte.c hostcheck.c conncache.c dotdot.c \
x509asn1.c http2.c smb.c curl_endian.c curl_des.c system_win32.c \
mime.c sha256.c setopt.c curl_path.c curl_ctype.c curl_range.c psl.c \
- doh.c urlapi.c curl_get_line.c altsvc.c
+ doh.c urlapi.c curl_get_line.c altsvc.c socketpair.c
LIB_HFILES = arpa_telnet.h netrc.h file.h timeval.h hostip.h progress.h \
formdata.h cookie.h http.h sendf.h ftp.h url.h dict.h if2ip.h \
@@ -1021,7 +1024,7 @@ LIB_HFILES = arpa_telnet.h netrc.h file.h timeval.h hostip.h progress.h \
x509asn1.h http2.h sigpipe.h smb.h curl_endian.h curl_des.h \
curl_printf.h system_win32.h rand.h mime.h curl_sha256.h setopt.h \
curl_path.h curl_ctype.h curl_range.h psl.h doh.h urlapi-int.h \
- curl_get_line.h altsvc.h quic.h
+ curl_get_line.h altsvc.h quic.h socketpair.h
LIB_RCFILES = libcurl.rc
CSOURCES = $(LIB_CFILES) $(LIB_VAUTH_CFILES) $(LIB_VTLS_CFILES) \
@@ -1390,6 +1393,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-slist.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-smb.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-smtp.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-socketpair.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-socks.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-socks_gssapi.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurl_la-socks_sspi.Plo@am__quote@ # am--include-marker
@@ -1500,6 +1504,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-slist.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-smb.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-smtp.Plo@am__quote@ # am--include-marker
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-socketpair.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-socks.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-socks_gssapi.Plo@am__quote@ # am--include-marker
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcurlu_la-socks_sspi.Plo@am__quote@ # am--include-marker
@@ -2380,6 +2385,13 @@ libcurl_la-altsvc.lo: altsvc.c
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-altsvc.lo `test -f 'altsvc.c' || echo '$(srcdir)/'`altsvc.c
+libcurl_la-socketpair.lo: socketpair.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT libcurl_la-socketpair.lo -MD -MP -MF $(DEPDIR)/libcurl_la-socketpair.Tpo -c -o libcurl_la-socketpair.lo `test -f 'socketpair.c' || echo '$(srcdir)/'`socketpair.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurl_la-socketpair.Tpo $(DEPDIR)/libcurl_la-socketpair.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='socketpair.c' object='libcurl_la-socketpair.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -c -o libcurl_la-socketpair.lo `test -f 'socketpair.c' || echo '$(srcdir)/'`socketpair.c
+
vauth/libcurl_la-vauth.lo: vauth/vauth.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurl_la_CPPFLAGS) $(CPPFLAGS) $(libcurl_la_CFLAGS) $(CFLAGS) -MT vauth/libcurl_la-vauth.lo -MD -MP -MF vauth/$(DEPDIR)/libcurl_la-vauth.Tpo -c -o vauth/libcurl_la-vauth.lo `test -f 'vauth/vauth.c' || echo '$(srcdir)/'`vauth/vauth.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) vauth/$(DEPDIR)/libcurl_la-vauth.Tpo vauth/$(DEPDIR)/libcurl_la-vauth.Plo
@@ -3353,6 +3365,13 @@ libcurlu_la-altsvc.lo: altsvc.c
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-altsvc.lo `test -f 'altsvc.c' || echo '$(srcdir)/'`altsvc.c
+libcurlu_la-socketpair.lo: socketpair.c
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT libcurlu_la-socketpair.lo -MD -MP -MF $(DEPDIR)/libcurlu_la-socketpair.Tpo -c -o libcurlu_la-socketpair.lo `test -f 'socketpair.c' || echo '$(srcdir)/'`socketpair.c
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcurlu_la-socketpair.Tpo $(DEPDIR)/libcurlu_la-socketpair.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='socketpair.c' object='libcurlu_la-socketpair.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -c -o libcurlu_la-socketpair.lo `test -f 'socketpair.c' || echo '$(srcdir)/'`socketpair.c
+
vauth/libcurlu_la-vauth.lo: vauth/vauth.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcurlu_la_CPPFLAGS) $(CPPFLAGS) $(libcurlu_la_CFLAGS) $(CFLAGS) -MT vauth/libcurlu_la-vauth.lo -MD -MP -MF vauth/$(DEPDIR)/libcurlu_la-vauth.Tpo -c -o vauth/libcurlu_la-vauth.lo `test -f 'vauth/vauth.c' || echo '$(srcdir)/'`vauth/vauth.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) vauth/$(DEPDIR)/libcurlu_la-vauth.Tpo vauth/$(DEPDIR)/libcurlu_la-vauth.Plo
@@ -3792,6 +3811,7 @@ distclean: distclean-am
-rm -f ./$(DEPDIR)/libcurl_la-slist.Plo
-rm -f ./$(DEPDIR)/libcurl_la-smb.Plo
-rm -f ./$(DEPDIR)/libcurl_la-smtp.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-socketpair.Plo
-rm -f ./$(DEPDIR)/libcurl_la-socks.Plo
-rm -f ./$(DEPDIR)/libcurl_la-socks_gssapi.Plo
-rm -f ./$(DEPDIR)/libcurl_la-socks_sspi.Plo
@@ -3902,6 +3922,7 @@ distclean: distclean-am
-rm -f ./$(DEPDIR)/libcurlu_la-slist.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-smb.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-smtp.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-socketpair.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-socks.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-socks_gssapi.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-socks_sspi.Plo
@@ -4115,6 +4136,7 @@ maintainer-clean: maintainer-clean-am
-rm -f ./$(DEPDIR)/libcurl_la-slist.Plo
-rm -f ./$(DEPDIR)/libcurl_la-smb.Plo
-rm -f ./$(DEPDIR)/libcurl_la-smtp.Plo
+ -rm -f ./$(DEPDIR)/libcurl_la-socketpair.Plo
-rm -f ./$(DEPDIR)/libcurl_la-socks.Plo
-rm -f ./$(DEPDIR)/libcurl_la-socks_gssapi.Plo
-rm -f ./$(DEPDIR)/libcurl_la-socks_sspi.Plo
@@ -4225,6 +4247,7 @@ maintainer-clean: maintainer-clean-am
-rm -f ./$(DEPDIR)/libcurlu_la-slist.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-smb.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-smtp.Plo
+ -rm -f ./$(DEPDIR)/libcurlu_la-socketpair.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-socks.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-socks_gssapi.Plo
-rm -f ./$(DEPDIR)/libcurlu_la-socks_sspi.Plo
diff --git a/libs/libcurl/src/Makefile.inc b/libs/libcurl/src/Makefile.inc
index 3e3a385c59..72ef428ee6 100644
--- a/libs/libcurl/src/Makefile.inc
+++ b/libs/libcurl/src/Makefile.inc
@@ -61,7 +61,7 @@ LIB_CFILES = file.c timeval.c base64.c hostip.c progress.c formdata.c \
curl_multibyte.c hostcheck.c conncache.c dotdot.c \
x509asn1.c http2.c smb.c curl_endian.c curl_des.c system_win32.c \
mime.c sha256.c setopt.c curl_path.c curl_ctype.c curl_range.c psl.c \
- doh.c urlapi.c curl_get_line.c altsvc.c
+ doh.c urlapi.c curl_get_line.c altsvc.c socketpair.c
LIB_HFILES = arpa_telnet.h netrc.h file.h timeval.h hostip.h progress.h \
formdata.h cookie.h http.h sendf.h ftp.h url.h dict.h if2ip.h \
@@ -82,7 +82,7 @@ LIB_HFILES = arpa_telnet.h netrc.h file.h timeval.h hostip.h progress.h \
x509asn1.h http2.h sigpipe.h smb.h curl_endian.h curl_des.h \
curl_printf.h system_win32.h rand.h mime.h curl_sha256.h setopt.h \
curl_path.h curl_ctype.h curl_range.h psl.h doh.h urlapi-int.h \
- curl_get_line.h altsvc.h quic.h
+ curl_get_line.h altsvc.h quic.h socketpair.h
LIB_RCFILES = libcurl.rc
diff --git a/libs/libcurl/src/Makefile.netware b/libs/libcurl/src/Makefile.netware
index 29885a19ea..a40534684d 100644
--- a/libs/libcurl/src/Makefile.netware
+++ b/libs/libcurl/src/Makefile.netware
@@ -640,8 +640,6 @@ ifdef WITH_SSL
@echo $(DL)#define HAVE_OPENSSL_ERR_H 1$(DL) >> $@
@echo $(DL)#define HAVE_OPENSSL_CRYPTO_H 1$(DL) >> $@
@echo $(DL)#define HAVE_OPENSSL_ENGINE_H 1$(DL) >> $@
- @echo $(DL)#define HAVE_LIBSSL 1$(DL) >> $@
- @echo $(DL)#define HAVE_LIBCRYPTO 1$(DL) >> $@
@echo $(DL)#define OPENSSL_NO_KRB5 1$(DL) >> $@
ifdef WITH_SRP
@echo $(DL)#define USE_TLS_SRP 1$(DL) >> $@
diff --git a/libs/libcurl/src/altsvc.c b/libs/libcurl/src/altsvc.c
index a649fefd80..64971a9f0f 100644
--- a/libs/libcurl/src/altsvc.c
+++ b/libs/libcurl/src/altsvc.c
@@ -54,8 +54,8 @@ static enum alpnid alpn2alpnid(char *name)
return ALPN_h1;
if(strcasecompare(name, "h2"))
return ALPN_h2;
-#if (defined(USE_QUICHE) || defined(USE_NGHTTP2)) && !defined(UNITTESTS)
- if(strcasecompare(name, "h3-22"))
+#if (defined(USE_QUICHE) || defined(USE_NGTCP2)) && !defined(UNITTESTS)
+ if(strcasecompare(name, "h3-23"))
return ALPN_h3;
#else
if(strcasecompare(name, "h3"))
@@ -73,8 +73,8 @@ const char *Curl_alpnid2str(enum alpnid id)
case ALPN_h2:
return "h2";
case ALPN_h3:
-#if (defined(USE_QUICHE) || defined(USE_NGHTTP2)) && !defined(UNITTESTS)
- return "h3-22";
+#if (defined(USE_QUICHE) || defined(USE_NGTCP2)) && !defined(UNITTESTS)
+ return "h3-23";
#else
return "h3";
#endif
@@ -442,6 +442,7 @@ CURLcode Curl_altsvc_parse(struct Curl_easy *data,
char option[32];
unsigned long num;
char *end_ptr;
+ bool quoted = FALSE;
semip++; /* pass the semicolon */
result = getalnum(&semip, option, sizeof(option));
if(result)
@@ -451,12 +452,21 @@ CURLcode Curl_altsvc_parse(struct Curl_easy *data,
if(*semip != '=')
continue;
semip++;
+ while(*semip && ISBLANK(*semip))
+ semip++;
+ if(*semip == '\"') {
+ /* quoted value */
+ semip++;
+ quoted = TRUE;
+ }
num = strtoul(semip, &end_ptr, 10);
- if(num < ULONG_MAX) {
+ if((end_ptr != semip) && num && (num < ULONG_MAX)) {
if(strcasecompare("ma", option))
maxage = num;
else if(strcasecompare("persist", option) && (num == 1))
persist = TRUE;
+ if(quoted && (*end_ptr == '\"'))
+ end_ptr++;
}
semip = end_ptr;
}
diff --git a/libs/libcurl/src/asyn-thread.c b/libs/libcurl/src/asyn-thread.c
index 24da748850..8c552baa9a 100644
--- a/libs/libcurl/src/asyn-thread.c
+++ b/libs/libcurl/src/asyn-thread.c
@@ -21,6 +21,7 @@
***************************************************************************/
#include "curl_setup.h"
+#include "socketpair.h"
/***********************************************************************
* Only for threaded name resolves builds
@@ -74,6 +75,7 @@
#include "inet_ntop.h"
#include "curl_threads.h"
#include "connect.h"
+#include "socketpair.h"
/* The last 3 #include files should be in this order */
#include "curl_printf.h"
#include "curl_memory.h"
@@ -163,7 +165,7 @@ struct thread_sync_data {
char *hostname; /* hostname to resolve, Curl_async.hostname
duplicate */
int port;
-#ifdef HAVE_SOCKETPAIR
+#ifdef USE_SOCKETPAIR
struct connectdata *conn;
curl_socket_t sock_pair[2]; /* socket pair */
#endif
@@ -201,7 +203,7 @@ void destroy_thread_sync_data(struct thread_sync_data * tsd)
if(tsd->res)
Curl_freeaddrinfo(tsd->res);
-#ifdef HAVE_SOCKETPAIR
+#ifdef USE_SOCKETPAIR
/*
* close one end of the socket pair (may be done in resolver thread);
* the other end (for reading) is always closed in the parent thread.
@@ -243,9 +245,9 @@ int init_thread_sync_data(struct thread_data * td,
Curl_mutex_init(tsd->mtx);
-#ifdef HAVE_SOCKETPAIR
- /* create socket pair */
- if(socketpair(AF_LOCAL, SOCK_STREAM, 0, &tsd->sock_pair[0]) < 0) {
+#ifdef USE_SOCKETPAIR
+ /* create socket pair, avoid AF_LOCAL since it doesn't build on Solaris */
+ if(Curl_socketpair(AF_UNIX, SOCK_STREAM, 0, &tsd->sock_pair[0]) < 0) {
tsd->sock_pair[0] = CURL_SOCKET_BAD;
tsd->sock_pair[1] = CURL_SOCKET_BAD;
goto err_exit;
@@ -297,7 +299,7 @@ static unsigned int CURL_STDCALL getaddrinfo_thread(void *arg)
struct thread_data *td = tsd->td;
char service[12];
int rc;
-#ifdef HAVE_SOCKETPAIR
+#ifdef USE_SOCKETPAIR
char buf[1];
#endif
@@ -322,11 +324,11 @@ static unsigned int CURL_STDCALL getaddrinfo_thread(void *arg)
free(td);
}
else {
-#ifdef HAVE_SOCKETPAIR
+#ifdef USE_SOCKETPAIR
if(tsd->sock_pair[1] != CURL_SOCKET_BAD) {
/* DNS has been resolved, signal client task */
buf[0] = 1;
- if(write(tsd->sock_pair[1], buf, sizeof(buf)) < 0) {
+ if(swrite(tsd->sock_pair[1], buf, sizeof(buf)) < 0) {
/* update sock_erro to errno */
tsd->sock_error = SOCKERRNO;
}
@@ -382,7 +384,7 @@ static void destroy_async_data(struct Curl_async *async)
if(async->os_specific) {
struct thread_data *td = (struct thread_data*) async->os_specific;
int done;
-#ifdef HAVE_SOCKETPAIR
+#ifdef USE_SOCKETPAIR
curl_socket_t sock_rd = td->tsd.sock_pair[0];
struct connectdata *conn = td->tsd.conn;
#endif
@@ -407,7 +409,7 @@ static void destroy_async_data(struct Curl_async *async)
free(async->os_specific);
}
-#ifdef HAVE_SOCKETPAIR
+#ifdef USE_SOCKETPAIR
/*
* ensure CURLMOPT_SOCKETFUNCTION fires CURL_POLL_REMOVE
* before the FD is invalidated to avoid EBADF on EPOLL_CTL_DEL
@@ -647,13 +649,13 @@ int Curl_resolver_getsock(struct connectdata *conn,
timediff_t ms;
struct Curl_easy *data = conn->data;
struct resdata *reslv = (struct resdata *)data->state.resolver;
-#ifdef HAVE_SOCKETPAIR
+#ifdef USE_SOCKETPAIR
struct thread_data *td = (struct thread_data*)conn->async.os_specific;
#else
(void)socks;
#endif
-#ifdef HAVE_SOCKETPAIR
+#ifdef USE_SOCKETPAIR
if(td) {
/* return read fd to client for polling the DNS resolution status */
socks[0] = td->tsd.sock_pair[0];
@@ -673,7 +675,7 @@ int Curl_resolver_getsock(struct connectdata *conn,
else
milli = 200;
Curl_expire(data, milli, EXPIRE_ASYNC_NAME);
-#ifdef HAVE_SOCKETPAIR
+#ifdef USE_SOCKETPAIR
}
#endif
diff --git a/libs/libcurl/src/checksrc.pl b/libs/libcurl/src/checksrc.pl
index 965f0bab1d..b2cfa83559 100644
--- a/libs/libcurl/src/checksrc.pl
+++ b/libs/libcurl/src/checksrc.pl
@@ -176,7 +176,7 @@ sub checkwarn {
$file = shift @ARGV;
-while(1) {
+while(defined $file) {
if($file =~ /-D(.*)/) {
$dir = $1;
diff --git a/libs/libcurl/src/config-amigaos.h b/libs/libcurl/src/config-amigaos.h
index 31cfc3afc2..12a87cf298 100644
--- a/libs/libcurl/src/config-amigaos.h
+++ b/libs/libcurl/src/config-amigaos.h
@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2016, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -36,8 +36,6 @@
#define HAVE_INTTYPES_H 1
#define HAVE_IOCTLSOCKET_CAMEL 1
#define HAVE_IOCTLSOCKET_CAMEL_FIONBIO 1
-#define HAVE_LIBCRYPTO 1
-#define HAVE_LIBSSL 1
#define HAVE_LIBZ 1
#define HAVE_LONGLONG 1
#define HAVE_MALLOC_H 1
diff --git a/libs/libcurl/src/config-os400.h b/libs/libcurl/src/config-os400.h
index d14bd33917..a302828e26 100644
--- a/libs/libcurl/src/config-os400.h
+++ b/libs/libcurl/src/config-os400.h
@@ -160,9 +160,6 @@
/* Define if you have the <krb.h> header file. */
#undef HAVE_KRB_H
-/* Define if you have the `crypto' library (-lcrypto). */
-#undef HAVE_LIBCRYPTO
-
/* Define if you have the `nsl' library (-lnsl). */
#undef HAVE_LIBNSL
@@ -175,9 +172,6 @@
/* Define if you have the `socket' library (-lsocket). */
#undef HAVE_LIBSOCKET
-/* Define if you have the `ssl' library (-lssl). */
-#undef HAVE_LIBSSL
-
/* Define if you have GSS API. */
#define HAVE_GSSAPI
diff --git a/libs/libcurl/src/config-plan9.h b/libs/libcurl/src/config-plan9.h
index 70833a51dd..64bfbdea05 100644
--- a/libs/libcurl/src/config-plan9.h
+++ b/libs/libcurl/src/config-plan9.h
@@ -126,7 +126,6 @@
#define HAVE_INTTYPES_H 1
#define HAVE_IOCTL 1
#define HAVE_LIBGEN_H 1
-#define HAVE_LIBSSL 1
#define HAVE_LIBZ 1
#define HAVE_LL 1
#define HAVE_LOCALE_H 1
diff --git a/libs/libcurl/src/config-riscos.h b/libs/libcurl/src/config-riscos.h
index 0379524fb3..4af94981c7 100644
--- a/libs/libcurl/src/config-riscos.h
+++ b/libs/libcurl/src/config-riscos.h
@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2013, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -164,9 +164,6 @@
/* Define if you have the <krb.h> header file. */
#undef HAVE_KRB_H
-/* Define if you have the `crypto' library (-lcrypto). */
-#undef HAVE_LIBCRYPTO
-
/* Define if you have the `nsl' library (-lnsl). */
#undef HAVE_LIBNSL
@@ -179,9 +176,6 @@
/* Define if you have the `socket' library (-lsocket). */
#undef HAVE_LIBSOCKET
-/* Define if you have the `ssl' library (-lssl). */
-#undef HAVE_LIBSSL
-
/* Define if you have the `ucb' library (-lucb). */
#undef HAVE_LIBUCB
diff --git a/libs/libcurl/src/config-symbian.h b/libs/libcurl/src/config-symbian.h
index b7b93c6f4c..cb2e96d5d1 100644
--- a/libs/libcurl/src/config-symbian.h
+++ b/libs/libcurl/src/config-symbian.h
@@ -315,9 +315,6 @@
/* Define to 1 if you have the <libssh2.h> header file. */
/*#define HAVE_LIBSSH2_H 1*/
-/* Define to 1 if you have the `ssl' library (-lssl). */
-/*#define HAVE_LIBSSL 1*/
-
/* if your compiler supports LL */
#define HAVE_LL 1
diff --git a/libs/libcurl/src/config-tpf.h b/libs/libcurl/src/config-tpf.h
index 778d9833fe..f0c095bb04 100644
--- a/libs/libcurl/src/config-tpf.h
+++ b/libs/libcurl/src/config-tpf.h
@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -277,10 +277,6 @@
/* Define to 1 if you have the `socket' library (-lsocket). */
/* #undef HAVE_LIBSOCKET */
-/* Define to 1 if you have the `ssl' library (-lssl). */
-/* #undef HAVE_LIBSSL */
-#define HAVE_LIBSSL 1
-
/* if zlib is available */
/* #undef HAVE_LIBZ */
diff --git a/libs/libcurl/src/config-vxworks.h b/libs/libcurl/src/config-vxworks.h
index 89af3525be..d352578e33 100644
--- a/libs/libcurl/src/config-vxworks.h
+++ b/libs/libcurl/src/config-vxworks.h
@@ -375,9 +375,6 @@
/* Define to 1 if you have the `libssh2_version' function. */
/* #undef HAVE_LIBSSH2_VERSION */
-/* Define to 1 if you have the `ssl' library (-lssl). */
-#define HAVE_LIBSSL 1
-
/* if zlib is available */
#define HAVE_LIBZ 1
diff --git a/libs/libcurl/src/config-win32.h b/libs/libcurl/src/config-win32.h
index 5b028f193f..90c1054765 100644
--- a/libs/libcurl/src/config-win32.h
+++ b/libs/libcurl/src/config-win32.h
@@ -246,10 +246,6 @@
/* Define if you have the socket function. */
#define HAVE_SOCKET 1
-/* Define if libSSH2 is in use */
-#define USE_LIBSSH2 1
-#define HAVE_LIBSSH2_H 1
-
/* Define if you have the strcasecmp function. */
/* #define HAVE_STRCASECMP 1 */
diff --git a/libs/libcurl/src/conncache.c b/libs/libcurl/src/conncache.c
index 2f4dd4bc3e..57d6061fda 100644
--- a/libs/libcurl/src/conncache.c
+++ b/libs/libcurl/src/conncache.c
@@ -143,10 +143,8 @@ int Curl_conncache_init(struct conncache *connc, int size)
rc = Curl_hash_init(&connc->hash, size, Curl_hash_str,
Curl_str_key_compare, free_bundle_hash_entry);
- if(rc) {
- Curl_close(connc->closure_handle);
- connc->closure_handle = NULL;
- }
+ if(rc)
+ Curl_close(&connc->closure_handle);
else
connc->closure_handle->state.conn_cache = connc;
@@ -595,7 +593,7 @@ void Curl_conncache_close_all_connections(struct conncache *connc)
Curl_hostcache_clean(connc->closure_handle,
connc->closure_handle->dns.hostcache);
- Curl_close(connc->closure_handle);
+ Curl_close(&connc->closure_handle);
sigpipe_restore(&pipe_st);
}
}
diff --git a/libs/libcurl/src/connect.c b/libs/libcurl/src/connect.c
index 77196250de..3b88a59623 100644
--- a/libs/libcurl/src/connect.c
+++ b/libs/libcurl/src/connect.c
@@ -665,7 +665,7 @@ bool Curl_addr2string(struct sockaddr *sa, curl_socklen_t salen,
#endif
#if defined(HAVE_SYS_UN_H) && defined(AF_UNIX)
case AF_UNIX:
- if(salen > sizeof(sa_family_t)) {
+ if(salen > (curl_socklen_t)sizeof(sa_family_t)) {
su = (struct sockaddr_un*)sa;
msnprintf(addr, MAX_IPADR_LEN, "%s", su->sun_path);
}
@@ -976,6 +976,14 @@ CURLcode Curl_is_connected(struct connectdata *conn,
failf(data, "Failed to connect to %s port %ld: %s",
hostname, conn->port,
Curl_strerror(error, buffer, sizeof(buffer)));
+
+#ifdef WSAETIMEDOUT
+ if(WSAETIMEDOUT == data->state.os_errno)
+ result = CURLE_OPERATION_TIMEDOUT;
+#elif defined(ETIMEDOUT)
+ if(ETIMEDOUT == data->state.os_errno)
+ result = CURLE_OPERATION_TIMEDOUT;
+#endif
}
return result;
@@ -1508,6 +1516,11 @@ CURLcode Curl_socket(struct connectdata *conn,
/* no socket, no connection */
return CURLE_COULDNT_CONNECT;
+ if(conn->transport == TRNSPRT_QUIC) {
+ /* QUIC sockets need to be nonblocking */
+ (void)curlx_nonblock(*sockfd, TRUE);
+ }
+
#if defined(ENABLE_IPV6) && defined(HAVE_SOCKADDR_IN6_SIN6_SCOPE_ID)
if(conn->scope_id && (addr->family == AF_INET6)) {
struct sockaddr_in6 * const sa6 = (void *)&addr->sa_addr;
diff --git a/libs/libcurl/src/cookie.c b/libs/libcurl/src/cookie.c
index 53ca40237f..f56bd85a93 100644
--- a/libs/libcurl/src/cookie.c
+++ b/libs/libcurl/src/cookie.c
@@ -1090,6 +1090,8 @@ Curl_cookie_add(struct Curl_easy *data,
*
* If 'newsession' is TRUE, discard all "session cookies" on read from file.
*
+ * Note that 'data' might be called as NULL pointer.
+ *
* Returns NULL on out of memory. Invalid cookies are ignored.
****************************************************************************/
struct CookieInfo *Curl_cookie_init(struct Curl_easy *data,
@@ -1160,6 +1162,8 @@ struct CookieInfo *Curl_cookie_init(struct Curl_easy *data,
}
c->running = TRUE; /* now, we're running */
+ if(data)
+ data->state.cookie_engine = TRUE;
return c;
@@ -1528,28 +1532,28 @@ static int cookie_output(struct CookieInfo *c, const char *dumphere)
if(c->numcookies) {
unsigned int i;
- unsigned int j;
+ size_t nvalid = 0;
struct Cookie **array;
- array = malloc(sizeof(struct Cookie *) * c->numcookies);
+ array = calloc(1, sizeof(struct Cookie *) * c->numcookies);
if(!array) {
if(!use_stdout)
fclose(out);
return 1;
}
- j = 0;
+ /* only sort the cookies with a domain property */
for(i = 0; i < COOKIE_HASH_SIZE; i++) {
for(co = c->cookies[i]; co; co = co->next) {
if(!co->domain)
continue;
- array[j++] = co;
+ array[nvalid++] = co;
}
}
- qsort(array, c->numcookies, sizeof(struct Cookie *), cookie_sort_ct);
+ qsort(array, nvalid, sizeof(struct Cookie *), cookie_sort_ct);
- for(i = 0; i < j; i++) {
+ for(i = 0; i < nvalid; i++) {
char *format_ptr = get_netscape_format(array[i]);
if(format_ptr == NULL) {
fprintf(out, "#\n# Fatal libcurl error\n");
@@ -1613,7 +1617,7 @@ struct curl_slist *Curl_cookie_list(struct Curl_easy *data)
return list;
}
-void Curl_flush_cookies(struct Curl_easy *data, int cleanup)
+void Curl_flush_cookies(struct Curl_easy *data, bool cleanup)
{
if(data->set.str[STRING_COOKIEJAR]) {
if(data->change.cookielist) {
@@ -1642,6 +1646,7 @@ void Curl_flush_cookies(struct Curl_easy *data, int cleanup)
if(cleanup && (!data->share || (data->cookies != data->share->cookies))) {
Curl_cookie_cleanup(data->cookies);
+ data->cookies = NULL;
}
Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
}
diff --git a/libs/libcurl/src/cookie.h b/libs/libcurl/src/cookie.h
index b2730cfb91..b3865e601a 100644
--- a/libs/libcurl/src/cookie.h
+++ b/libs/libcurl/src/cookie.h
@@ -109,7 +109,7 @@ void Curl_cookie_clearsess(struct CookieInfo *cookies);
#define Curl_cookie_cleanup(x) Curl_nop_stmt
#define Curl_flush_cookies(x,y) Curl_nop_stmt
#else
-void Curl_flush_cookies(struct Curl_easy *data, int cleanup);
+void Curl_flush_cookies(struct Curl_easy *data, bool cleanup);
void Curl_cookie_cleanup(struct CookieInfo *);
struct CookieInfo *Curl_cookie_init(struct Curl_easy *data,
const char *, struct CookieInfo *, bool);
diff --git a/libs/libcurl/src/curl_config.h.cmake b/libs/libcurl/src/curl_config.h.cmake
index 5458cbaca7..e0793a7ee4 100644
--- a/libs/libcurl/src/curl_config.h.cmake
+++ b/libs/libcurl/src/curl_config.h.cmake
@@ -407,9 +407,6 @@
/* Define to 1 if you have the <libssh2.h> header file. */
#cmakedefine HAVE_LIBSSH2_H 1
-/* Define to 1 if you have the `ssl' library (-lssl). */
-#cmakedefine HAVE_LIBSSL 1
-
/* if zlib is available */
#cmakedefine HAVE_LIBZ 1
diff --git a/libs/libcurl/src/curl_config.h.in b/libs/libcurl/src/curl_config.h.in
index e5abe77b9e..32a87bd293 100644
--- a/libs/libcurl/src/curl_config.h.in
+++ b/libs/libcurl/src/curl_config.h.in
@@ -628,6 +628,9 @@
/* Define to 1 if you have the `SSLv2_client_method' function. */
#undef HAVE_SSLV2_CLIENT_METHOD
+/* Define to 1 if you have the `SSL_get_esni_status' function. */
+#undef HAVE_SSL_GET_ESNI_STATUS
+
/* Define to 1 if you have the <ssl.h> header file. */
#undef HAVE_SSL_H
@@ -952,6 +955,9 @@
/* Define to enable c-ares support */
#undef USE_ARES
+/* if ESNI support is available */
+#undef USE_ESNI
+
/* if GnuTLS is enabled */
#undef USE_GNUTLS
diff --git a/libs/libcurl/src/doh.c b/libs/libcurl/src/doh.c
index 6d1f3303b5..d1795789e5 100644
--- a/libs/libcurl/src/doh.c
+++ b/libs/libcurl/src/doh.c
@@ -74,17 +74,26 @@ static const char *doh_strerror(DOHcode code)
#define UNITTEST static
#endif
+/* @unittest 1655
+ */
UNITTEST DOHcode doh_encode(const char *host,
DNStype dnstype,
unsigned char *dnsp, /* buffer */
size_t len, /* buffer size */
size_t *olen) /* output length */
{
- size_t hostlen = strlen(host);
+ const size_t hostlen = strlen(host);
unsigned char *orig = dnsp;
const char *hostp = host;
- if(len < (12 + hostlen + 4))
+ /* The expected output length does not depend on the number of dots within
+ * the host name. It will always be two more than the length of the host
+ * name, one for the size and one trailing null. In case there are dots,
+ * each dot adds one size but removes the need to store the dot, net zero.
+ */
+ const size_t expected_len = 12 + ( 1 + hostlen + 1) + 4;
+
+ if(len < expected_len)
return DOH_TOO_SMALL_BUFFER;
*dnsp++ = 0; /* 16 bit id */
@@ -126,12 +135,18 @@ UNITTEST DOHcode doh_encode(const char *host,
}
} while(1);
- *dnsp++ = '\0'; /* upper 8 bit TYPE */
- *dnsp++ = (unsigned char)dnstype;
+ /* There are assigned TYPE codes beyond 255: use range [1..65535] */
+ *dnsp++ = (unsigned char)(255 & (dnstype>>8)); /* upper 8 bit TYPE */
+ *dnsp++ = (unsigned char)(255 & dnstype); /* lower 8 bit TYPE */
+
*dnsp++ = '\0'; /* upper 8 bit CLASS */
*dnsp++ = DNS_CLASS_IN; /* IN - "the Internet" */
*olen = dnsp - orig;
+
+ /* verify that our assumption of length is valid, since
+ * this has lead to buffer overflows in this function */
+ DEBUGASSERT(*olen == expected_len);
return DOH_OK;
}
@@ -225,7 +240,10 @@ static CURLcode dohprobe(struct Curl_easy *data,
}
timeout_ms = Curl_timeleft(data, NULL, TRUE);
-
+ if(timeout_ms <= 0) {
+ result = CURLE_OPERATION_TIMEDOUT;
+ goto error;
+ }
/* Curl_open() is the internal version of curl_easy_init() */
result = Curl_open(&doh);
if(!result) {
@@ -246,6 +264,9 @@ static CURLcode dohprobe(struct Curl_easy *data,
#ifndef CURLDEBUG
/* enforce HTTPS if not debug */
ERROR_CHECK_SETOPT(CURLOPT_PROTOCOLS, CURLPROTO_HTTPS);
+#else
+ /* in debug mode, also allow http */
+ ERROR_CHECK_SETOPT(CURLOPT_PROTOCOLS, CURLPROTO_HTTP|CURLPROTO_HTTPS);
#endif
ERROR_CHECK_SETOPT(CURLOPT_TIMEOUT_MS, (long)timeout_ms);
if(data->set.verbose)
@@ -325,7 +346,7 @@ static CURLcode dohprobe(struct Curl_easy *data,
error:
free(nurl);
- Curl_close(doh);
+ Curl_close(&doh);
return result;
}
@@ -381,10 +402,8 @@ Curl_addrinfo *Curl_doh(struct connectdata *conn,
error:
curl_slist_free_all(data->req.doh.headers);
data->req.doh.headers = NULL;
- curl_easy_cleanup(data->req.doh.probe[0].easy);
- data->req.doh.probe[0].easy = NULL;
- curl_easy_cleanup(data->req.doh.probe[1].easy);
- data->req.doh.probe[1].easy = NULL;
+ Curl_close(&data->req.doh.probe[0].easy);
+ Curl_close(&data->req.doh.probe[1].easy);
return NULL;
}
@@ -419,8 +438,14 @@ static unsigned short get16bit(unsigned char *doh, int index)
static unsigned int get32bit(unsigned char *doh, int index)
{
- return (doh[index] << 24) | (doh[index + 1] << 16) |
- (doh[index + 2] << 8) | doh[index + 3];
+ /* make clang and gcc optimize this to bswap by incrementing
+ the pointer first. */
+ doh += index;
+
+ /* avoid undefined behaviour by casting to unsigned before shifting
+ 24 bits, possibly into the sign bit. codegen is same, but
+ ub sanitizer won't be upset */
+ return ( (unsigned)doh[0] << 24) | (doh[1] << 16) |(doh[2] << 8) | doh[3];
}
static DOHcode store_a(unsigned char *doh, int index, struct dohentry *d)
@@ -898,17 +923,16 @@ CURLcode Curl_doh_is_resolved(struct connectdata *conn,
struct dohentry de;
/* remove DOH handles from multi handle and close them */
curl_multi_remove_handle(data->multi, data->req.doh.probe[0].easy);
- Curl_close(data->req.doh.probe[0].easy);
+ Curl_close(&data->req.doh.probe[0].easy);
curl_multi_remove_handle(data->multi, data->req.doh.probe[1].easy);
- Curl_close(data->req.doh.probe[1].easy);
-
+ Curl_close(&data->req.doh.probe[1].easy);
/* parse the responses, create the struct and return it! */
init_dohentry(&de);
rc = doh_decode(data->req.doh.probe[0].serverdoh.memory,
data->req.doh.probe[0].serverdoh.size,
data->req.doh.probe[0].dnstype,
&de);
- free(data->req.doh.probe[0].serverdoh.memory);
+ Curl_safefree(data->req.doh.probe[0].serverdoh.memory);
if(rc) {
infof(data, "DOH: %s type %s for %s\n", doh_strerror(rc),
type2name(data->req.doh.probe[0].dnstype),
@@ -918,7 +942,7 @@ CURLcode Curl_doh_is_resolved(struct connectdata *conn,
data->req.doh.probe[1].serverdoh.size,
data->req.doh.probe[1].dnstype,
&de);
- free(data->req.doh.probe[1].serverdoh.memory);
+ Curl_safefree(data->req.doh.probe[1].serverdoh.memory);
if(rc2) {
infof(data, "DOH: %s type %s for %s\n", doh_strerror(rc2),
type2name(data->req.doh.probe[1].dnstype),
diff --git a/libs/libcurl/src/easy.c b/libs/libcurl/src/easy.c
index 0b0016be47..001648d49b 100644
--- a/libs/libcurl/src/easy.c
+++ b/libs/libcurl/src/easy.c
@@ -731,7 +731,7 @@ void curl_easy_cleanup(struct Curl_easy *data)
return;
sigpipe_ignore(data, &pipe_st);
- Curl_close(data);
+ Curl_close(&data);
sigpipe_restore(&pipe_st);
}
@@ -1020,9 +1020,8 @@ CURLcode curl_easy_pause(struct Curl_easy *data, int action)
/* if there's no error and we're not pausing both directions, we want
to have this handle checked soon */
- if(!result &&
- ((newstate&(KEEP_RECV_PAUSE|KEEP_SEND_PAUSE)) !=
- (KEEP_RECV_PAUSE|KEEP_SEND_PAUSE)) ) {
+ if((newstate & (KEEP_RECV_PAUSE|KEEP_SEND_PAUSE)) !=
+ (KEEP_RECV_PAUSE|KEEP_SEND_PAUSE)) {
Curl_expire(data, 0, EXPIRE_RUN_NOW); /* get this handle going again */
if(data->multi)
Curl_update_timer(data->multi);
diff --git a/libs/libcurl/src/ftp.c b/libs/libcurl/src/ftp.c
index e807a2acde..8072a33d5d 100644
--- a/libs/libcurl/src/ftp.c
+++ b/libs/libcurl/src/ftp.c
@@ -523,7 +523,7 @@ static CURLcode AllowServerConnect(struct connectdata *conn, bool *connected)
}
else {
/* Add timeout to multi handle and break out of the loop */
- if(!result && *connected == FALSE) {
+ if(*connected == FALSE) {
Curl_expire(data, data->set.accepttimeout > 0 ?
data->set.accepttimeout: DEFAULT_ACCEPT_TIMEOUT, 0);
}
@@ -867,6 +867,10 @@ static CURLcode ftp_state_cwd(struct connectdata *conn)
/* already done and fine */
result = ftp_state_mdtm(conn);
else {
+ /* FTPFILE_NOCWD with full path: expect ftpc->cwddone! */
+ DEBUGASSERT((conn->data->set.ftp_filemethod != FTPFILE_NOCWD) ||
+ !(ftpc->dirdepth && ftpc->dirs[0][0] == '/'));
+
ftpc->count2 = 0; /* count2 counts failed CWDs */
/* count3 is set to allow a MKD to fail once. In the case when first CWD
@@ -874,10 +878,9 @@ static CURLcode ftp_state_cwd(struct connectdata *conn)
dir) this then allows for a second try to CWD to it */
ftpc->count3 = (conn->data->set.ftp_create_missing_dirs == 2)?1:0;
- if((conn->data->set.ftp_filemethod == FTPFILE_NOCWD) && !ftpc->cwdcount)
- /* No CWD necessary */
- result = ftp_state_mdtm(conn);
- else if(conn->bits.reuse && ftpc->entrypath) {
+ if(conn->bits.reuse && ftpc->entrypath &&
+ /* no need to go to entrypath when we have an absolute path */
+ !(ftpc->dirdepth && ftpc->dirs[0][0] == '/')) {
/* This is a re-used connection. Since we change directory to where the
transfer is taking place, we must first get back to the original dir
where we ended up after login: */
@@ -1436,31 +1439,37 @@ static CURLcode ftp_state_list(struct connectdata *conn)
servers either... */
/*
- if FTPFILE_NOCWD was specified, we are currently in
- the user's home directory, so we should add the path
+ if FTPFILE_NOCWD was specified, we should add the path
as argument for the LIST / NLST / or custom command.
Whether the server will support this, is uncertain.
The other ftp_filemethods will CWD into dir/dir/ first and
then just do LIST (in that case: nothing to do here)
*/
- char *cmd, *lstArg, *slashPos;
- const char *inpath = ftp->path;
-
- lstArg = NULL;
- if((data->set.ftp_filemethod == FTPFILE_NOCWD) &&
- inpath && inpath[0] && strchr(inpath, '/')) {
- size_t n = strlen(inpath);
-
- /* Check if path does not end with /, as then we cut off the file part */
- if(inpath[n - 1] != '/') {
- /* chop off the file part if format is dir/dir/file */
- slashPos = strrchr(inpath, '/');
- n = slashPos - inpath;
- }
- result = Curl_urldecode(data, inpath, n, &lstArg, NULL, TRUE);
+ char *lstArg = NULL;
+ char *cmd;
+
+ if((data->set.ftp_filemethod == FTPFILE_NOCWD) && ftp->path) {
+ /* url-decode before evaluation: e.g. paths starting/ending with %2f */
+ const char *slashPos = NULL;
+ char *rawPath = NULL;
+ result = Curl_urldecode(data, ftp->path, 0, &rawPath, NULL, TRUE);
if(result)
return result;
+
+ slashPos = strrchr(rawPath, '/');
+ if(slashPos) {
+ /* chop off the file part if format is dir/file otherwise remove
+ the trailing slash for dir/dir/ except for absolute path / */
+ size_t n = slashPos - rawPath;
+ if(n == 0)
+ ++n;
+
+ lstArg = rawPath;
+ lstArg[n] = '\0';
+ }
+ else
+ free(rawPath);
}
cmd = aprintf("%s%s%s",
@@ -1469,15 +1478,12 @@ static CURLcode ftp_state_list(struct connectdata *conn)
(data->set.ftp_list_only?"NLST":"LIST"),
lstArg? " ": "",
lstArg? lstArg: "");
+ free(lstArg);
- if(!cmd) {
- free(lstArg);
+ if(!cmd)
return CURLE_OUT_OF_MEMORY;
- }
result = Curl_pp_sendf(&conn->proto.ftpc.pp, "%s", cmd);
-
- free(lstArg);
free(cmd);
if(result)
@@ -2242,9 +2248,25 @@ static CURLcode ftp_state_size_resp(struct connectdata *conn,
char *buf = data->state.buffer;
/* get the size from the ascii string: */
- if(ftpcode == 213)
+ if(ftpcode == 213) {
+ /* To allow servers to prepend "rubbish" in the response string, we scan
+ for all the digits at the end of the response and parse only those as a
+ number. */
+ char *start = &buf[4];
+ char *fdigit = strchr(start, '\r');
+ if(fdigit) {
+ do
+ fdigit--;
+ while(ISDIGIT(*fdigit) && (fdigit > start));
+ if(!ISDIGIT(*fdigit))
+ fdigit++;
+ }
+ else
+ fdigit = start;
/* ignores parsing errors, which will make the size remain unknown */
- (void)curlx_strtoofft(buf + 4, NULL, 0, &filesize);
+ (void)curlx_strtoofft(fdigit, NULL, 0, &filesize);
+
+ }
if(instate == FTP_SIZE) {
#ifdef CURL_FTP_HTTPSTYLE_HEAD
@@ -3115,7 +3137,8 @@ static CURLcode ftp_done(struct connectdata *conn, CURLcode status,
ssize_t nread;
int ftpcode;
CURLcode result = CURLE_OK;
- char *path = NULL;
+ char *rawPath = NULL;
+ size_t pathLen = 0;
if(!ftp)
return CURLE_OK;
@@ -3153,9 +3176,6 @@ static CURLcode ftp_done(struct connectdata *conn, CURLcode status,
break;
}
- /* now store a copy of the directory we are in */
- free(ftpc->prevpath);
-
if(data->state.wildcardmatch) {
if(data->set.chunk_end && ftpc->file) {
Curl_set_in_callback(data, true);
@@ -3166,41 +3186,41 @@ static CURLcode ftp_done(struct connectdata *conn, CURLcode status,
}
if(!result)
- /* get the "raw" path */
- result = Curl_urldecode(data, ftp->path, 0, &path, NULL, TRUE);
+ /* get the url-decoded "raw" path */
+ result = Curl_urldecode(data, ftp->path, 0, &rawPath, &pathLen, TRUE);
if(result) {
/* We can limp along anyway (and should try to since we may already be in
* the error path) */
ftpc->ctl_valid = FALSE; /* mark control connection as bad */
connclose(conn, "FTP: out of memory!"); /* mark for connection closure */
+ free(ftpc->prevpath);
ftpc->prevpath = NULL; /* no path remembering */
}
- else {
- size_t flen = ftpc->file?strlen(ftpc->file):0; /* file is "raw" already */
- size_t dlen = strlen(path)-flen;
- if(!ftpc->cwdfail) {
- ftpc->prevmethod = data->set.ftp_filemethod;
- if(dlen && (data->set.ftp_filemethod != FTPFILE_NOCWD)) {
- ftpc->prevpath = path;
- if(flen)
- /* if 'path' is not the whole string */
- ftpc->prevpath[dlen] = 0; /* terminate */
+ else { /* remember working directory for connection reuse */
+ if((data->set.ftp_filemethod == FTPFILE_NOCWD) && (rawPath[0] == '/'))
+ free(rawPath); /* full path => no CWDs happened => keep ftpc->prevpath */
+ else {
+ free(ftpc->prevpath);
+
+ if(!ftpc->cwdfail) {
+ if(data->set.ftp_filemethod == FTPFILE_NOCWD)
+ pathLen = 0; /* relative path => working directory is FTP home */
+ else
+ pathLen -= ftpc->file?strlen(ftpc->file):0; /* file is url-decoded */
+
+ rawPath[pathLen] = '\0';
+ ftpc->prevpath = rawPath;
}
else {
- free(path);
- /* we never changed dir */
- ftpc->prevpath = strdup("");
- if(!ftpc->prevpath)
- return CURLE_OUT_OF_MEMORY;
+ free(rawPath);
+ ftpc->prevpath = NULL; /* no path */
}
- if(ftpc->prevpath)
- infof(data, "Remembering we are in dir \"%s\"\n", ftpc->prevpath);
- }
- else {
- ftpc->prevpath = NULL; /* no path */
- free(path);
}
+
+ if(ftpc->prevpath)
+ infof(data, "Remembering we are in dir \"%s\"\n", ftpc->prevpath);
}
+
/* free the dir tree and file parts */
freedirs(ftpc);
@@ -3513,14 +3533,13 @@ static CURLcode ftp_do_more(struct connectdata *conn, int *completep)
/* if we got an error or if we don't wait for a data connection return
immediately */
- if(result || (ftpc->wait_data_conn != TRUE))
+ if(result || !ftpc->wait_data_conn)
return result;
- if(ftpc->wait_data_conn)
- /* if we reach the end of the FTP state machine here, *complete will be
- TRUE but so is ftpc->wait_data_conn, which says we need to wait for
- the data connection and therefore we're not actually complete */
- *completep = 0;
+ /* if we reach the end of the FTP state machine here, *complete will be
+ TRUE but so is ftpc->wait_data_conn, which says we need to wait for the
+ data connection and therefore we're not actually complete */
+ *completep = 0;
}
if(ftp->transfer <= FTPTRANSFER_INFO) {
@@ -3554,13 +3573,8 @@ static CURLcode ftp_do_more(struct connectdata *conn, int *completep)
return result;
result = ftp_multi_statemach(conn, &complete);
- if(ftpc->wait_data_conn)
- /* if we reach the end of the FTP state machine here, *complete will be
- TRUE but so is ftpc->wait_data_conn, which says we need to wait for
- the data connection and therefore we're not actually complete */
- *completep = 0;
- else
- *completep = (int)complete;
+ /* ftpc->wait_data_conn is always false here */
+ *completep = (int)complete;
}
else {
/* download */
@@ -3600,10 +3614,8 @@ static CURLcode ftp_do_more(struct connectdata *conn, int *completep)
return result;
}
- if(!result && (ftp->transfer != FTPTRANSFER_BODY))
- /* no data to transfer. FIX: it feels like a kludge to have this here
- too! */
- Curl_setup_transfer(data, -1, -1, FALSE, -1);
+ /* no data to transfer */
+ Curl_setup_transfer(data, -1, -1, FALSE, -1);
if(!ftpc->wait_data_conn) {
/* no waiting for the data connection so this is now complete */
@@ -4080,186 +4092,142 @@ CURLcode ftp_parse_url_path(struct connectdata *conn)
/* the ftp struct is already inited in ftp_connect() */
struct FTP *ftp = data->req.protop;
struct ftp_conn *ftpc = &conn->proto.ftpc;
- const char *slash_pos; /* position of the first '/' char in curpos */
- const char *path_to_use = ftp->path;
- const char *cur_pos;
- const char *filename = NULL;
-
- cur_pos = path_to_use; /* current position in path. point at the begin of
- next path component */
+ const char *slashPos = NULL;
+ const char *fileName = NULL;
+ CURLcode result = CURLE_OK;
+ char *rawPath = NULL; /* url-decoded "raw" path */
+ size_t pathLen = 0;
ftpc->ctl_valid = FALSE;
ftpc->cwdfail = FALSE;
- switch(data->set.ftp_filemethod) {
- case FTPFILE_NOCWD:
- /* fastest, but less standard-compliant */
-
- /*
- The best time to check whether the path is a file or directory is right
- here. so:
+ /* url-decode ftp path before further evaluation */
+ result = Curl_urldecode(data, ftp->path, 0, &rawPath, &pathLen, TRUE);
+ if(result)
+ return result;
- the first condition in the if() right here, is there just in case
- someone decides to set path to NULL one day
- */
- if(path_to_use[0] &&
- (path_to_use[strlen(path_to_use) - 1] != '/') )
- filename = path_to_use; /* this is a full file path */
- /*
- else {
- ftpc->file is not used anywhere other than for operations on a file.
- In other words, never for directory operations.
- So we can safely leave filename as NULL here and use it as a
- argument in dir/file decisions.
- }
- */
- break;
+ switch(data->set.ftp_filemethod) {
+ case FTPFILE_NOCWD: /* fastest, but less standard-compliant */
- case FTPFILE_SINGLECWD:
- /* get the last slash */
- if(!path_to_use[0]) {
- /* no dir, no file */
- ftpc->dirdepth = 0;
+ if((pathLen > 0) && (rawPath[pathLen - 1] != '/'))
+ fileName = rawPath; /* this is a full file path */
+ /*
+ else: ftpc->file is not used anywhere other than for operations on
+ a file. In other words, never for directory operations.
+ So we can safely leave filename as NULL here and use it as a
+ argument in dir/file decisions.
+ */
break;
- }
- slash_pos = strrchr(cur_pos, '/');
- if(slash_pos || !*cur_pos) {
- size_t dirlen = slash_pos-cur_pos;
- CURLcode result;
- ftpc->dirs = calloc(1, sizeof(ftpc->dirs[0]));
- if(!ftpc->dirs)
- return CURLE_OUT_OF_MEMORY;
+ case FTPFILE_SINGLECWD:
+ slashPos = strrchr(rawPath, '/');
+ if(slashPos) {
+ /* get path before last slash, except for / */
+ size_t dirlen = slashPos - rawPath;
+ if(dirlen == 0)
+ dirlen++;
+
+ ftpc->dirs = calloc(1, sizeof(ftpc->dirs[0]));
+ if(!ftpc->dirs) {
+ free(rawPath);
+ return CURLE_OUT_OF_MEMORY;
+ }
- if(!dirlen)
- dirlen++;
+ ftpc->dirs[0] = calloc(1, dirlen + 1);
+ if(!ftpc->dirs[0]) {
+ free(rawPath);
+ return CURLE_OUT_OF_MEMORY;
+ }
- result = Curl_urldecode(conn->data, slash_pos ? cur_pos : "/",
- slash_pos ? dirlen : 1,
- &ftpc->dirs[0], NULL,
- TRUE);
- if(result) {
- freedirs(ftpc);
- return result;
+ strncpy(ftpc->dirs[0], rawPath, dirlen);
+ ftpc->dirdepth = 1; /* we consider it to be a single dir */
+ fileName = slashPos + 1; /* rest is file name */
}
- ftpc->dirdepth = 1; /* we consider it to be a single dir */
- filename = slash_pos ? slash_pos + 1 : cur_pos; /* rest is file name */
- }
- else
- filename = cur_pos; /* this is a file name only */
- break;
+ else
+ fileName = rawPath; /* file name only (or empty) */
+ break;
- default: /* allow pretty much anything */
- case FTPFILE_MULTICWD:
- ftpc->dirdepth = 0;
- ftpc->diralloc = 5; /* default dir depth to allocate */
- ftpc->dirs = calloc(ftpc->diralloc, sizeof(ftpc->dirs[0]));
- if(!ftpc->dirs)
- return CURLE_OUT_OF_MEMORY;
+ default: /* allow pretty much anything */
+ case FTPFILE_MULTICWD: {
+ /* current position: begin of next path component */
+ const char *curPos = rawPath;
+
+ int dirAlloc = 0; /* number of entries allocated for the 'dirs' array */
+ const char *str = rawPath;
+ for(; *str != 0; ++str)
+ if (*str == '/')
+ ++dirAlloc;
+
+ if(dirAlloc > 0) {
+ ftpc->dirs = calloc(dirAlloc, sizeof(ftpc->dirs[0]));
+ if(!ftpc->dirs) {
+ free(rawPath);
+ return CURLE_OUT_OF_MEMORY;
+ }
+
+ /* parse the URL path into separate path components */
+ while((slashPos = strchr(curPos, '/')) != NULL) {
+ size_t compLen = slashPos - curPos;
+
+ /* path starts with a slash: add that as a directory */
+ if((compLen == 0) && (ftpc->dirdepth == 0))
+ ++compLen;
- /* we have a special case for listing the root dir only */
- if(!strcmp(path_to_use, "/")) {
- cur_pos++; /* make it point to the zero byte */
- ftpc->dirs[0] = strdup("/");
- ftpc->dirdepth++;
- }
- else {
- /* parse the URL path into separate path components */
- while((slash_pos = strchr(cur_pos, '/')) != NULL) {
- /* 1 or 0 pointer offset to indicate absolute directory */
- ssize_t absolute_dir = ((cur_pos - ftp->path > 0) &&
- (ftpc->dirdepth == 0))?1:0;
-
- /* seek out the next path component */
- if(slash_pos-cur_pos) {
/* we skip empty path components, like "x//y" since the FTP command
CWD requires a parameter and a non-existent parameter a) doesn't
work on many servers and b) has no effect on the others. */
- size_t len = slash_pos - cur_pos + absolute_dir;
- CURLcode result =
- Curl_urldecode(conn->data, cur_pos - absolute_dir, len,
- &ftpc->dirs[ftpc->dirdepth], NULL,
- TRUE);
- if(result) {
- freedirs(ftpc);
- return result;
- }
- }
- else {
- cur_pos = slash_pos + 1; /* jump to the rest of the string */
- if(!ftpc->dirdepth) {
- /* path starts with a slash, add that as a directory */
- ftpc->dirs[ftpc->dirdepth] = strdup("/");
- if(!ftpc->dirs[ftpc->dirdepth++]) { /* run out of memory ... */
- failf(data, "no memory");
- freedirs(ftpc);
+ if(compLen > 0) {
+ char *comp = calloc(1, compLen + 1);
+ if(!comp) {
+ free(rawPath);
return CURLE_OUT_OF_MEMORY;
}
+ strncpy(comp, curPos, compLen);
+ ftpc->dirs[ftpc->dirdepth++] = comp;
}
- continue;
- }
-
- cur_pos = slash_pos + 1; /* jump to the rest of the string */
- if(++ftpc->dirdepth >= ftpc->diralloc) {
- /* enlarge array */
- char **bigger;
- ftpc->diralloc *= 2; /* double the size each time */
- bigger = realloc(ftpc->dirs, ftpc->diralloc * sizeof(ftpc->dirs[0]));
- if(!bigger) {
- freedirs(ftpc);
- return CURLE_OUT_OF_MEMORY;
- }
- ftpc->dirs = bigger;
+ curPos = slashPos + 1;
}
}
+ DEBUGASSERT(ftpc->dirdepth <= dirAlloc);
+ fileName = curPos; /* the rest is the file name (or empty) */
}
- filename = cur_pos; /* the rest is the file name */
break;
} /* switch */
- if(filename && *filename) {
- CURLcode result =
- Curl_urldecode(conn->data, filename, 0, &ftpc->file, NULL, TRUE);
-
- if(result) {
- freedirs(ftpc);
- return result;
- }
- }
+ if(fileName && *fileName)
+ ftpc->file = strdup(fileName);
else
- ftpc->file = NULL; /* instead of point to a zero byte, we make it a NULL
- pointer */
+ ftpc->file = NULL; /* instead of point to a zero byte,
+ we make it a NULL pointer */
if(data->set.upload && !ftpc->file && (ftp->transfer == FTPTRANSFER_BODY)) {
/* We need a file name when uploading. Return error! */
failf(data, "Uploading to a URL without a file name!");
+ free(rawPath);
return CURLE_URL_MALFORMAT;
}
ftpc->cwddone = FALSE; /* default to not done */
- if(ftpc->prevpath) {
- /* prevpath is "raw" so we convert the input path before we compare the
- strings */
- size_t dlen;
- char *path;
- CURLcode result =
- Curl_urldecode(conn->data, ftp->path, 0, &path, &dlen, TRUE);
- if(result) {
- freedirs(ftpc);
- return result;
- }
+ if((data->set.ftp_filemethod == FTPFILE_NOCWD) && (rawPath[0] == '/'))
+ ftpc->cwddone = TRUE; /* skip CWD for absolute paths */
+ else { /* newly created FTP connections are already in entry path */
+ const char *oldPath = conn->bits.reuse ? ftpc->prevpath : "";
+ if(oldPath) {
+ size_t n = pathLen;
+ if(data->set.ftp_filemethod == FTPFILE_NOCWD)
+ n = 0; /* CWD to entry for relative paths */
+ else
+ n -= ftpc->file?strlen(ftpc->file):0;
- dlen -= ftpc->file?strlen(ftpc->file):0;
- if((dlen == strlen(ftpc->prevpath)) &&
- !strncmp(path, ftpc->prevpath, dlen) &&
- (ftpc->prevmethod == data->set.ftp_filemethod)) {
- infof(data, "Request has same path as previous transfer\n");
- ftpc->cwddone = TRUE;
+ if((strlen(oldPath) == n) && !strncmp(rawPath, oldPath, n)) {
+ infof(data, "Request has same path as previous transfer\n");
+ ftpc->cwddone = TRUE;
+ }
}
- free(path);
}
+ free(rawPath);
return CURLE_OK;
}
diff --git a/libs/libcurl/src/ftp.h b/libs/libcurl/src/ftp.h
index 828d69a21d..2c88d568c1 100644
--- a/libs/libcurl/src/ftp.h
+++ b/libs/libcurl/src/ftp.h
@@ -121,8 +121,7 @@ struct ftp_conn {
char *entrypath; /* the PWD reply when we logged on */
char **dirs; /* realloc()ed array for path components */
int dirdepth; /* number of entries used in the 'dirs' array */
- int diralloc; /* number of entries allocated for the 'dirs' array */
- char *file; /* decoded file */
+ char *file; /* url-decoded file name (or path) */
bool dont_check; /* Set to TRUE to prevent the final (post-transfer)
file size and 226/250 status check. It should still
read the line, just ignore the result. */
@@ -135,8 +134,7 @@ struct ftp_conn {
bool cwdfail; /* set TRUE if a CWD command fails, as then we must prevent
caching the current directory */
bool wait_data_conn; /* this is set TRUE if data connection is waited */
- char *prevpath; /* conn->path from the previous transfer */
- curl_ftpfile prevmethod; /* ftp method in previous transfer */
+ char *prevpath; /* url-decoded conn->path from the previous transfer */
char transfertype; /* set by ftp_transfertype for use by Curl_client_write()a
and others (A/I or zero) */
int count1; /* general purpose counter for the state machine */
diff --git a/libs/libcurl/src/ftplistparser.c b/libs/libcurl/src/ftplistparser.c
index c4eb43732d..f399a4c272 100644
--- a/libs/libcurl/src/ftplistparser.c
+++ b/libs/libcurl/src/ftplistparser.c
@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
diff --git a/libs/libcurl/src/hostcheck.c b/libs/libcurl/src/hostcheck.c
index 115d24b2e2..9e0db05fac 100644
--- a/libs/libcurl/src/hostcheck.c
+++ b/libs/libcurl/src/hostcheck.c
@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
diff --git a/libs/libcurl/src/hostip.c b/libs/libcurl/src/hostip.c
index bd532a891e..d4e8f9366a 100644
--- a/libs/libcurl/src/hostip.c
+++ b/libs/libcurl/src/hostip.c
@@ -749,7 +749,7 @@ clean_up:
conn->created) / 1000;
/* the alarm period is counted in even number of seconds */
- unsigned long alarm_set = prev_alarm - elapsed_secs;
+ unsigned long alarm_set = (unsigned long)(prev_alarm - elapsed_secs);
if(!alarm_set ||
((alarm_set >= 0x80000000) && (prev_alarm < 0x80000000)) ) {
diff --git a/libs/libcurl/src/http.c b/libs/libcurl/src/http.c
index 28d1fa607d..4631a7f36b 100644
--- a/libs/libcurl/src/http.c
+++ b/libs/libcurl/src/http.c
@@ -450,9 +450,6 @@ static CURLcode http_perhapsrewind(struct connectdata *conn)
/* figure out how much data we are expected to send */
switch(data->set.httpreq) {
case HTTPREQ_POST:
- if(data->state.infilesize != -1)
- expectsend = data->state.infilesize;
- break;
case HTTPREQ_PUT:
if(data->state.infilesize != -1)
expectsend = data->state.infilesize;
@@ -2679,7 +2676,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
struct Cookie *co = NULL; /* no cookies from start */
int count = 0;
- if(data->cookies) {
+ if(data->cookies && data->state.cookie_engine) {
Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
co = Curl_cookie_getlist(data->cookies,
conn->allocptr.cookiehost?
@@ -3044,8 +3041,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
failf(data, "Failed sending HTTP request");
else
/* HTTP GET/HEAD download: */
- Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE,
- http->postdata?FIRSTSOCKET:-1);
+ Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE, -1);
}
if(result)
return result;
@@ -4017,7 +4013,7 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
data->state.resume_from = 0; /* get everything */
}
#if !defined(CURL_DISABLE_COOKIES)
- else if(data->cookies &&
+ else if(data->cookies && data->state.cookie_engine &&
checkprefix("Set-Cookie:", k->p)) {
Curl_share_lock(data, CURL_LOCK_DATA_COOKIE,
CURL_LOCK_ACCESS_SINGLE);
@@ -4058,7 +4054,7 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
if(result)
return result;
}
- #ifdef USE_SPNEGO
+#ifdef USE_SPNEGO
else if(checkprefix("Persistent-Auth", k->p)) {
struct negotiatedata *negdata = &conn->negotiate;
struct auth *authp = &data->state.authhost;
@@ -4066,14 +4062,15 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
char *persistentauth = Curl_copy_header_value(k->p);
if(!persistentauth)
return CURLE_OUT_OF_MEMORY;
- negdata->noauthpersist = checkprefix("false", persistentauth);
+ negdata->noauthpersist = checkprefix("false", persistentauth)?
+ TRUE:FALSE;
negdata->havenoauthpersist = TRUE;
infof(data, "Negotiate: noauthpersist -> %d, header part: %s",
negdata->noauthpersist, persistentauth);
free(persistentauth);
}
}
- #endif
+#endif
else if((k->httpcode >= 300 && k->httpcode < 400) &&
checkprefix("Location:", k->p) &&
!data->req.location) {
diff --git a/libs/libcurl/src/http.h b/libs/libcurl/src/http.h
index f0ddec7590..a3a2757025 100644
--- a/libs/libcurl/src/http.h
+++ b/libs/libcurl/src/http.h
@@ -83,11 +83,6 @@ CURLcode Curl_http(struct connectdata *conn, bool *done);
CURLcode Curl_http_done(struct connectdata *, CURLcode, bool premature);
CURLcode Curl_http_connect(struct connectdata *conn, bool *done);
-/* The following functions are defined in http_chunks.c */
-void Curl_httpchunk_init(struct connectdata *conn);
-CHUNKcode Curl_httpchunk_read(struct connectdata *conn, char *datap,
- ssize_t length, ssize_t *wrote);
-
/* These functions are in http.c */
CURLcode Curl_http_input_auth(struct connectdata *conn, bool proxy,
const char *auth);
diff --git a/libs/libcurl/src/http2.c b/libs/libcurl/src/http2.c
index 31d2d698ad..6315fc4014 100644
--- a/libs/libcurl/src/http2.c
+++ b/libs/libcurl/src/http2.c
@@ -496,16 +496,14 @@ static struct Curl_easy *duphandle(struct Curl_easy *data)
/* setup the request struct */
struct HTTP *http = calloc(1, sizeof(struct HTTP));
if(!http) {
- (void)Curl_close(second);
- second = NULL;
+ (void)Curl_close(&second);
}
else {
second->req.protop = http;
http->header_recvbuf = Curl_add_buffer_init();
if(!http->header_recvbuf) {
free(http);
- (void)Curl_close(second);
- second = NULL;
+ (void)Curl_close(&second);
}
else {
Curl_http2_setup_req(second);
@@ -547,7 +545,7 @@ static int push_promise(struct Curl_easy *data,
stream = data->req.protop;
if(!stream) {
failf(data, "Internal NULL stream!\n");
- (void)Curl_close(newhandle);
+ (void)Curl_close(&newhandle);
rv = 1;
goto fail;
}
@@ -569,7 +567,7 @@ static int push_promise(struct Curl_easy *data,
/* denied, kill off the new handle again */
http2_stream_free(newhandle->req.protop);
newhandle->req.protop = NULL;
- (void)Curl_close(newhandle);
+ (void)Curl_close(&newhandle);
goto fail;
}
@@ -585,7 +583,7 @@ static int push_promise(struct Curl_easy *data,
infof(data, "failed to add handle to multi\n");
http2_stream_free(newhandle->req.protop);
newhandle->req.protop = NULL;
- Curl_close(newhandle);
+ Curl_close(&newhandle);
rv = 1;
goto fail;
}
@@ -848,6 +846,7 @@ static int on_stream_close(nghttp2_session *session, int32_t stream_id,
stream->closed = TRUE;
httpc = &conn->proto.httpc;
drain_this(data_s, httpc);
+ Curl_expire(data_s, 0, EXPIRE_RUN_NOW);
httpc->error_code = error_code;
/* remove the entry from the hash as the stream is now gone */
@@ -967,7 +966,9 @@ static int on_header(nghttp2_session *session, const nghttp2_frame *frame,
if(!check)
/* no memory */
return NGHTTP2_ERR_CALLBACK_FAILURE;
- if(!Curl_strcasecompare(check, (const char *)value)) {
+ if(!Curl_strcasecompare(check, (const char *)value) &&
+ ((conn->remote_port != conn->given->defport) ||
+ !Curl_strcasecompare(conn->host.name, (const char *)value))) {
/* This is push is not for the same authority that was asked for in
* the URL. RFC 7540 section 8.2 says: "A client MUST treat a
* PUSH_PROMISE for which the server is not authoritative as a stream
@@ -1157,7 +1158,7 @@ static void populate_settings(struct connectdata *conn,
nghttp2_settings_entry *iv = httpc->local_settings;
iv[0].settings_id = NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS;
- iv[0].value = 100;
+ iv[0].value = (uint32_t)Curl_multi_max_concurrent_streams(conn->data->multi);
iv[1].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
iv[1].value = HTTP2_HUGE_WINDOW_SIZE;
@@ -1535,6 +1536,7 @@ static int h2_session_send(struct Curl_easy *data,
H2BUGF(infof(data, "Queuing PRIORITY on stream %u (easy %p)\n",
stream->stream_id, data));
+ DEBUGASSERT(stream->stream_id != -1);
rv = nghttp2_submit_priority(h2, NGHTTP2_FLAG_NONE, stream->stream_id,
&pri_spec);
if(rv)
@@ -1659,6 +1661,9 @@ static ssize_t http2_recv(struct connectdata *conn, int sockindex,
socket is not read. But it seems that usually streams are
notified with its drain property, and socket is read again
quickly. */
+ if(stream->closed)
+ /* closed overrides paused */
+ return 0;
H2BUGF(infof(data, "stream %x is paused, pause id: %x\n",
stream->stream_id, httpc->pause_stream_id));
*err = CURLE_AGAIN;
@@ -1773,8 +1778,9 @@ static ssize_t http2_recv(struct connectdata *conn, int sockindex,
field list. */
#define AUTHORITY_DST_IDX 3
+/* USHRT_MAX is 65535 == 0xffff */
#define HEADER_OVERFLOW(x) \
- (x.namelen > (uint16_t)-1 || x.valuelen > (uint16_t)-1 - x.namelen)
+ (x.namelen > 0xffff || x.valuelen > 0xffff - x.namelen)
/*
* Check header memory for the token "trailers".
@@ -2024,8 +2030,10 @@ static ssize_t http2_send(struct connectdata *conn, int sockindex,
nva[i].namelen = strlen((char *)nva[i].name);
}
else {
- nva[i].name = (unsigned char *)hdbuf;
nva[i].namelen = (size_t)(end - hdbuf);
+ /* Lower case the header name for HTTP/2 */
+ Curl_strntolower((char *)hdbuf, hdbuf, nva[i].namelen);
+ nva[i].name = (unsigned char *)hdbuf;
}
hdbuf = end + 1;
while(*hdbuf == ' ' || *hdbuf == '\t')
@@ -2135,17 +2143,14 @@ static ssize_t http2_send(struct connectdata *conn, int sockindex,
return -1;
}
- if(stream->stream_id != -1) {
- /* If whole HEADERS frame was sent off to the underlying socket,
- the nghttp2 library calls data_source_read_callback. But only
- it found that no data available, so it deferred the DATA
- transmission. Which means that nghttp2_session_want_write()
- returns 0 on http2_perform_getsock(), which results that no
- writable socket check is performed. To workaround this, we
- issue nghttp2_session_resume_data() here to bring back DATA
- transmission from deferred state. */
- nghttp2_session_resume_data(h2, stream->stream_id);
- }
+ /* If whole HEADERS frame was sent off to the underlying socket, the nghttp2
+ library calls data_source_read_callback. But only it found that no data
+ available, so it deferred the DATA transmission. Which means that
+ nghttp2_session_want_write() returns 0 on http2_perform_getsock(), which
+ results that no writable socket check is performed. To workaround this,
+ we issue nghttp2_session_resume_data() here to bring back DATA
+ transmission from deferred state. */
+ nghttp2_session_resume_data(h2, stream->stream_id);
return len;
diff --git a/libs/libcurl/src/http_chunks.c b/libs/libcurl/src/http_chunks.c
index 18dfcb2824..b6ffa41854 100644
--- a/libs/libcurl/src/http_chunks.c
+++ b/libs/libcurl/src/http_chunks.c
@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -109,7 +109,8 @@ void Curl_httpchunk_init(struct connectdata *conn)
CHUNKcode Curl_httpchunk_read(struct connectdata *conn,
char *datap,
ssize_t datalen,
- ssize_t *wrotep)
+ ssize_t *wrotep,
+ CURLcode *extrap)
{
CURLcode result = CURLE_OK;
struct Curl_easy *data = conn->data;
@@ -125,8 +126,10 @@ CHUNKcode Curl_httpchunk_read(struct connectdata *conn,
chunk read process, to properly calculate the content length*/
if(data->set.http_te_skip && !k->ignorebody) {
result = Curl_client_write(conn, CLIENTWRITE_BODY, datap, datalen);
- if(result)
- return CHUNKE_WRITE_ERROR;
+ if(result) {
+ *extrap = result;
+ return CHUNKE_PASSTHRU_ERROR;
+ }
}
while(length) {
@@ -197,8 +200,10 @@ CHUNKcode Curl_httpchunk_read(struct connectdata *conn,
else
result = Curl_client_write(conn, CLIENTWRITE_BODY, datap, piece);
- if(result)
- return CHUNKE_WRITE_ERROR;
+ if(result) {
+ *extrap = result;
+ return CHUNKE_PASSTHRU_ERROR;
+ }
}
*wrote += piece;
@@ -244,8 +249,10 @@ CHUNKcode Curl_httpchunk_read(struct connectdata *conn,
if(!data->set.http_te_skip) {
result = Curl_client_write(conn, CLIENTWRITE_HEADER,
conn->trailer, conn->trlPos);
- if(result)
- return CHUNKE_WRITE_ERROR;
+ if(result) {
+ *extrap = result;
+ return CHUNKE_PASSTHRU_ERROR;
+ }
}
conn->trlPos = 0;
ch->state = CHUNK_TRAILER_CR;
@@ -339,8 +346,9 @@ const char *Curl_chunked_strerror(CHUNKcode code)
return "Illegal or missing hexadecimal sequence";
case CHUNKE_BAD_CHUNK:
return "Malformed encoding found";
- case CHUNKE_WRITE_ERROR:
- return "Write error";
+ case CHUNKE_PASSTHRU_ERROR:
+ DEBUGASSERT(0); /* never used */
+ return "";
case CHUNKE_BAD_ENCODING:
return "Bad content-encoding found";
case CHUNKE_OUT_OF_MEMORY:
diff --git a/libs/libcurl/src/http_chunks.h b/libs/libcurl/src/http_chunks.h
index b969c5590a..8f4a33c8e4 100644
--- a/libs/libcurl/src/http_chunks.h
+++ b/libs/libcurl/src/http_chunks.h
@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2014, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -21,6 +21,9 @@
* KIND, either express or implied.
*
***************************************************************************/
+
+struct connectdata;
+
/*
* The longest possible hexadecimal number we support in a chunked transfer.
* Weird enough, RFC2616 doesn't set a maximum size! Since we use strtoul()
@@ -71,9 +74,9 @@ typedef enum {
CHUNKE_TOO_LONG_HEX = 1,
CHUNKE_ILLEGAL_HEX,
CHUNKE_BAD_CHUNK,
- CHUNKE_WRITE_ERROR,
CHUNKE_BAD_ENCODING,
CHUNKE_OUT_OF_MEMORY,
+ CHUNKE_PASSTHRU_ERROR, /* Curl_httpchunk_read() returns a CURLcode to use */
CHUNKE_LAST
} CHUNKcode;
@@ -87,4 +90,10 @@ struct Curl_chunker {
size_t dataleft; /* untouched data amount at the end of the last buffer */
};
+/* The following functions are defined in http_chunks.c */
+void Curl_httpchunk_init(struct connectdata *conn);
+CHUNKcode Curl_httpchunk_read(struct connectdata *conn, char *datap,
+ ssize_t length, ssize_t *wrote,
+ CURLcode *passthru);
+
#endif /* HEADER_CURL_HTTP_CHUNKS_H */
diff --git a/libs/libcurl/src/http_proxy.c b/libs/libcurl/src/http_proxy.c
index ba67b861bd..f095455a51 100644
--- a/libs/libcurl/src/http_proxy.c
+++ b/libs/libcurl/src/http_proxy.c
@@ -327,7 +327,7 @@ static CURLcode CONNECT(struct connectdata *conn,
{ /* READING RESPONSE PHASE */
int error = SELECT_OK;
- while(s->keepon && !error) {
+ while(s->keepon) {
ssize_t gotbytes;
/* make sure we have space to read more data */
@@ -384,11 +384,12 @@ static CURLcode CONNECT(struct connectdata *conn,
/* chunked-encoded body, so we need to do the chunked dance
properly to know when the end of the body is reached */
CHUNKcode r;
+ CURLcode extra;
ssize_t tookcareof = 0;
/* now parse the chunked piece of data so that we can
properly tell when the stream ends */
- r = Curl_httpchunk_read(conn, s->ptr, 1, &tookcareof);
+ r = Curl_httpchunk_read(conn, s->ptr, 1, &tookcareof, &extra);
if(r == CHUNKE_STOP) {
/* we're done reading chunks! */
infof(data, "chunk reading DONE\n");
@@ -455,6 +456,7 @@ static CURLcode CONNECT(struct connectdata *conn,
}
else if(s->chunked_encoding) {
CHUNKcode r;
+ CURLcode extra;
infof(data, "Ignore chunked response-body\n");
@@ -472,7 +474,8 @@ static CURLcode CONNECT(struct connectdata *conn,
/* now parse the chunked piece of data so that we can
properly tell when the stream ends */
- r = Curl_httpchunk_read(conn, s->line_start + 1, 1, &gotbytes);
+ r = Curl_httpchunk_read(conn, s->line_start + 1, 1, &gotbytes,
+ &extra);
if(r == CHUNKE_STOP) {
/* we're done reading chunks! */
infof(data, "chunk reading DONE\n");
diff --git a/libs/libcurl/src/imap.c b/libs/libcurl/src/imap.c
index ff9b629470..66172bddcc 100644
--- a/libs/libcurl/src/imap.c
+++ b/libs/libcurl/src/imap.c
@@ -1306,6 +1306,7 @@ static CURLcode imap_statemach_act(struct connectdata *conn)
break;
case IMAP_LIST:
+ case IMAP_SEARCH:
result = imap_state_listsearch_resp(conn, imapcode, imapc->state);
break;
@@ -1329,10 +1330,6 @@ static CURLcode imap_statemach_act(struct connectdata *conn)
result = imap_state_append_final_resp(conn, imapcode, imapc->state);
break;
- case IMAP_SEARCH:
- result = imap_state_listsearch_resp(conn, imapcode, imapc->state);
- break;
-
case IMAP_LOGOUT:
/* fallthrough, just stop! */
default:
diff --git a/libs/libcurl/src/ldap.c b/libs/libcurl/src/ldap.c
index fd31faa3e7..af3d61c57e 100644
--- a/libs/libcurl/src/ldap.c
+++ b/libs/libcurl/src/ldap.c
@@ -119,6 +119,12 @@ static void _ldap_free_urldesc(LDAPURLDesc *ludp);
#define LDAP_TRACE(x) Curl_nop_stmt
#endif
+#if defined(USE_WIN32_LDAP) && defined(ldap_err2string)
+/* Use ansi error strings in UNICODE builds */
+#undef ldap_err2string
+#define ldap_err2string ldap_err2stringA
+#endif
+
static CURLcode Curl_ldap(struct connectdata *conn, bool *done);
@@ -838,10 +844,10 @@ static bool split_str(char *str, char ***out, size_t *count)
static int _ldap_url_parse2(const struct connectdata *conn, LDAPURLDesc *ludp)
{
int rc = LDAP_SUCCESS;
- char *path;
- char *query;
char *p;
- char *q;
+ char *path;
+ char *q = NULL;
+ char *query = NULL;
size_t i;
if(!conn->data ||
@@ -859,11 +865,13 @@ static int _ldap_url_parse2(const struct connectdata *conn, LDAPURLDesc *ludp)
if(!path)
return LDAP_NO_MEMORY;
- /* Duplicate the query */
- q = query = strdup(conn->data->state.up.query);
- if(!query) {
- free(path);
- return LDAP_NO_MEMORY;
+ /* Duplicate the query if present */
+ if(conn->data->state.up.query) {
+ q = query = strdup(conn->data->state.up.query);
+ if(!query) {
+ free(path);
+ return LDAP_NO_MEMORY;
+ }
}
/* Parse the DN (Distinguished Name) */
diff --git a/libs/libcurl/src/libcurl.plist b/libs/libcurl/src/libcurl.plist
index 9db8aa3107..55c2ed494d 100644
--- a/libs/libcurl/src/libcurl.plist
+++ b/libs/libcurl/src/libcurl.plist
@@ -15,7 +15,7 @@
<string>se.haxx.curl.libcurl</string>
<key>CFBundleVersion</key>
- <string>7.66.0</string>
+ <string>7.67.0</string>
<key>CFBundleName</key>
<string>libcurl</string>
@@ -27,9 +27,9 @@
<string>????</string>
<key>CFBundleShortVersionString</key>
- <string>libcurl 7.66.0</string>
+ <string>libcurl 7.67.0</string>
<key>CFBundleGetInfoString</key>
- <string>libcurl.plist 7.66.0</string>
+ <string>libcurl.plist 7.67.0</string>
</dict>
</plist>
diff --git a/libs/libcurl/src/mime.c b/libs/libcurl/src/mime.c
index 2135f72c25..081e51e508 100644
--- a/libs/libcurl/src/mime.c
+++ b/libs/libcurl/src/mime.c
@@ -1135,6 +1135,8 @@ CURLcode Curl_mime_duppart(curl_mimepart *dst, const curl_mimepart *src)
const curl_mimepart *s;
CURLcode res = CURLE_OK;
+ DEBUGASSERT(dst);
+
/* Duplicate content. */
switch(src->kind) {
case MIMEKIND_NONE:
@@ -1184,20 +1186,18 @@ CURLcode Curl_mime_duppart(curl_mimepart *dst, const curl_mimepart *src)
}
}
- /* Duplicate other fields. */
- if(dst != NULL)
+ if(!res) {
+ /* Duplicate other fields. */
dst->encoder = src->encoder;
- else
- res = CURLE_WRITE_ERROR;
- if(!res)
res = curl_mime_type(dst, src->mimetype);
+ }
if(!res)
res = curl_mime_name(dst, src->name);
if(!res)
res = curl_mime_filename(dst, src->filename);
/* If an error occurred, rollback. */
- if(res && dst)
+ if(res)
Curl_mime_cleanpart(dst);
return res;
@@ -1901,4 +1901,11 @@ CURLcode curl_mime_headers(curl_mimepart *part,
return CURLE_NOT_BUILT_IN;
}
+CURLcode Curl_mime_add_header(struct curl_slist **slp, const char *fmt, ...)
+{
+ (void)slp;
+ (void)fmt;
+ return CURLE_NOT_BUILT_IN;
+}
+
#endif /* if disabled */
diff --git a/libs/libcurl/src/mime.h b/libs/libcurl/src/mime.h
index 4c9a5fb71f..3241fdc1f7 100644
--- a/libs/libcurl/src/mime.h
+++ b/libs/libcurl/src/mime.h
@@ -127,7 +127,9 @@ struct curl_mimepart_s {
mime_encoder_state encstate; /* Data encoder state. */
};
-#if (!defined(CURL_DISABLE_HTTP) && !defined(CURL_DISABLE_MIME)) || \
+CURLcode Curl_mime_add_header(struct curl_slist **slp, const char *fmt, ...);
+
+#if (!defined(CURL_DISABLE_HTTP) && !defined(CURL_DISABLE_MIME)) || \
!defined(CURL_DISABLE_SMTP) || !defined(CURL_DISABLE_IMAP)
/* Prototypes. */
@@ -144,7 +146,6 @@ curl_off_t Curl_mime_size(curl_mimepart *part);
size_t Curl_mime_read(char *buffer, size_t size, size_t nitems,
void *instream);
CURLcode Curl_mime_rewind(curl_mimepart *part);
-CURLcode Curl_mime_add_header(struct curl_slist **slp, const char *fmt, ...);
const char *Curl_mime_contenttype(const char *filename);
#else
@@ -157,7 +158,6 @@ const char *Curl_mime_contenttype(const char *filename);
#define Curl_mime_size(x) (curl_off_t) -1
#define Curl_mime_read NULL
#define Curl_mime_rewind(x) ((void)x, CURLE_NOT_BUILT_IN)
-#define Curl_mime_add_header(x,y,...) CURLE_NOT_BUILT_IN
#endif
diff --git a/libs/libcurl/src/multi.c b/libs/libcurl/src/multi.c
index 2e91e4ff35..6dfe8842e7 100644
--- a/libs/libcurl/src/multi.c
+++ b/libs/libcurl/src/multi.c
@@ -363,7 +363,7 @@ struct Curl_multi *Curl_multi_handle(int hashsize, /* socket hash */
Curl_llist_init(&multi->msglist, NULL);
Curl_llist_init(&multi->pending, NULL);
- multi->multiplexing = CURLPIPE_MULTIPLEX;
+ multi->multiplexing = TRUE;
/* -1 means it not set by user, use the default value */
multi->maxconnects = -1;
@@ -2772,6 +2772,16 @@ CURLMcode curl_multi_setopt(struct Curl_multi *multi,
break;
case CURLMOPT_PIPELINING_SERVER_BL:
break;
+ case CURLMOPT_MAX_CONCURRENT_STREAMS:
+ {
+ long streams = va_arg(param, long);
+ if(streams < 1)
+ streams = 100;
+ multi->max_concurrent_streams =
+ (streams > (long)INITIAL_MAX_CONCURRENT_STREAMS)?
+ (long)INITIAL_MAX_CONCURRENT_STREAMS : streams;
+ }
+ break;
default:
res = CURLM_UNKNOWN_OPTION;
break;
@@ -3210,3 +3220,9 @@ void Curl_multi_dump(struct Curl_multi *multi)
}
}
#endif
+
+size_t Curl_multi_max_concurrent_streams(struct Curl_multi *multi)
+{
+ return multi ? ((size_t)multi->max_concurrent_streams ?
+ (size_t)multi->max_concurrent_streams : 100) : 0;
+}
diff --git a/libs/libcurl/src/multihandle.h b/libs/libcurl/src/multihandle.h
index 279379ae0f..b65bd96386 100644
--- a/libs/libcurl/src/multihandle.h
+++ b/libs/libcurl/src/multihandle.h
@@ -133,6 +133,7 @@ struct Curl_multi {
struct curltime timer_lastcall; /* the fixed time for the timeout for the
previous callback */
bool in_callback; /* true while executing a callback */
+ long max_concurrent_streams; /* max concurrent streams client to support */
};
#endif /* HEADER_CURL_MULTIHANDLE_H */
diff --git a/libs/libcurl/src/multiif.h b/libs/libcurl/src/multiif.h
index 0755a7cd24..75025232c4 100644
--- a/libs/libcurl/src/multiif.h
+++ b/libs/libcurl/src/multiif.h
@@ -89,4 +89,10 @@ CURLMcode Curl_multi_add_perform(struct Curl_multi *multi,
struct Curl_easy *data,
struct connectdata *conn);
+
+/* Return the value of the CURLMOPT_MAX_CONCURRENT_STREAMS option
+ * If not specified or 0, default would be 100
+ */
+size_t Curl_multi_max_concurrent_streams(struct Curl_multi *multi);
+
#endif /* HEADER_CURL_MULTIIF_H */
diff --git a/libs/libcurl/src/netrc.c b/libs/libcurl/src/netrc.c
index c74065e813..1c9da31993 100644
--- a/libs/libcurl/src/netrc.c
+++ b/libs/libcurl/src/netrc.c
@@ -88,7 +88,7 @@ static int parsenetrc(const char *host,
if(tok && *tok == '#')
/* treat an initial hash as a comment line */
continue;
- while(!done && tok) {
+ while(tok) {
if((login && *login) && (password && *password)) {
done = TRUE;
diff --git a/libs/libcurl/src/non-ascii.c b/libs/libcurl/src/non-ascii.c
index 42beaec45d..a48e67db4a 100644
--- a/libs/libcurl/src/non-ascii.c
+++ b/libs/libcurl/src/non-ascii.c
@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
diff --git a/libs/libcurl/src/parsedate.c b/libs/libcurl/src/parsedate.c
index 7ae5eb8cde..f4b18d091a 100644
--- a/libs/libcurl/src/parsedate.c
+++ b/libs/libcurl/src/parsedate.c
@@ -100,16 +100,20 @@ static int parsedate(const char *date, time_t *output);
#define PARSEDATE_LATER 1
#define PARSEDATE_SOONER 2
-#ifndef CURL_DISABLE_PARSEDATE
-
+#if !defined(CURL_DISABLE_PARSEDATE) || !defined(CURL_DISABLE_FTP) || \
+ !defined(CURL_DISABLE_FILE)
+/* These names are also used by FTP and FILE code */
const char * const Curl_wkday[] =
{"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"};
-static const char * const weekday[] =
-{ "Monday", "Tuesday", "Wednesday", "Thursday",
- "Friday", "Saturday", "Sunday" };
const char * const Curl_month[]=
{ "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec" };
+#endif
+
+#ifndef CURL_DISABLE_PARSEDATE
+static const char * const weekday[] =
+{ "Monday", "Tuesday", "Wednesday", "Thursday",
+ "Friday", "Saturday", "Sunday" };
struct tzinfo {
char name[5];
diff --git a/libs/libcurl/src/security.c b/libs/libcurl/src/security.c
index c5e4e135df..fbfa707413 100644
--- a/libs/libcurl/src/security.c
+++ b/libs/libcurl/src/security.c
@@ -236,7 +236,7 @@ static ssize_t sec_recv(struct connectdata *conn, int sockindex,
/* Handle clear text response. */
if(conn->sec_complete == 0 || conn->data_prot == PROT_CLEAR)
- return read(fd, buffer, len);
+ return sread(fd, buffer, len);
if(conn->in_buffer.eof_flag) {
conn->in_buffer.eof_flag = 0;
diff --git a/libs/libcurl/src/setopt.c b/libs/libcurl/src/setopt.c
index 8909035a97..64c29e3336 100644
--- a/libs/libcurl/src/setopt.c
+++ b/libs/libcurl/src/setopt.c
@@ -315,7 +315,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
* Parse the $HOME/.netrc file
*/
arg = va_arg(param, long);
- if((arg < CURL_NETRC_IGNORED) || (arg > CURL_NETRC_REQUIRED))
+ if((arg < CURL_NETRC_IGNORED) || (arg >= CURL_NETRC_LAST))
return CURLE_BAD_FUNCTION_ARGUMENT;
data->set.use_netrc = (enum CURL_NETRC_OPTION)arg;
break;
@@ -342,7 +342,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
* curl/curl.h header file.
*/
arg = va_arg(param, long);
- if((arg < CURL_TIMECOND_NONE) || (arg > CURL_TIMECOND_LASTMOD))
+ if((arg < CURL_TIMECOND_NONE) || (arg >= CURL_TIMECOND_LAST))
return CURLE_BAD_FUNCTION_ARGUMENT;
data->set.timecondition = (curl_TimeCond)arg;
break;
@@ -752,7 +752,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
}
else if(strcasecompare(argptr, "FLUSH")) {
/* flush cookies to file, takes care of the locking */
- Curl_flush_cookies(data, 0);
+ Curl_flush_cookies(data, FALSE);
}
else if(strcasecompare(argptr, "RELOAD")) {
/* reload cookies from file */
@@ -818,7 +818,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
if(arg >= CURL_HTTP_VERSION_2)
return CURLE_UNSUPPORTED_PROTOCOL;
#else
- if(arg > CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE)
+ if(arg >= CURL_HTTP_VERSION_LAST)
return CURLE_UNSUPPORTED_PROTOCOL;
if(arg == CURL_HTTP_VERSION_NONE)
arg = CURL_HTTP_VERSION_2TLS;
@@ -1109,7 +1109,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
* How do access files over FTP.
*/
arg = va_arg(param, long);
- if((arg < CURLFTPMETHOD_DEFAULT) || (arg > CURLFTPMETHOD_SINGLECWD))
+ if((arg < CURLFTPMETHOD_DEFAULT) || (arg >= CURLFTPMETHOD_LAST))
return CURLE_BAD_FUNCTION_ARGUMENT;
data->set.ftp_filemethod = (curl_ftpfile)arg;
break;
@@ -1136,7 +1136,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
case CURLOPT_FTP_SSL_CCC:
arg = va_arg(param, long);
- if((arg < CURLFTPSSL_CCC_NONE) || (arg > CURLFTPSSL_CCC_ACTIVE))
+ if((arg < CURLFTPSSL_CCC_NONE) || (arg >= CURLFTPSSL_CCC_LAST))
return CURLE_BAD_FUNCTION_ARGUMENT;
data->set.ftp_ccc = (curl_ftpccc)arg;
break;
@@ -1164,7 +1164,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
* Set a specific auth for FTP-SSL transfers.
*/
arg = va_arg(param, long);
- if((arg < CURLFTPAUTH_DEFAULT) || (arg > CURLFTPAUTH_TLS))
+ if((arg < CURLFTPAUTH_DEFAULT) || (arg >= CURLFTPAUTH_LAST))
return CURLE_BAD_FUNCTION_ARGUMENT;
data->set.ftpsslauth = (curl_ftpauth)arg;
break;
@@ -2123,7 +2123,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
* Make transfers attempt to use SSL/TLS.
*/
arg = va_arg(param, long);
- if((arg < CURLUSESSL_NONE) || (arg > CURLUSESSL_ALL))
+ if((arg < CURLUSESSL_NONE) || (arg >= CURLUSESSL_LAST))
return CURLE_BAD_FUNCTION_ARGUMENT;
data->set.use_ssl = (curl_usessl)arg;
break;
@@ -2500,7 +2500,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
case CURLOPT_RTSP_SERVER_CSEQ:
/* Same as the above, but for server-initiated requests */
- data->state.rtsp_next_client_CSeq = va_arg(param, long);
+ data->state.rtsp_next_server_CSeq = va_arg(param, long);
break;
case CURLOPT_INTERLEAVEDATA:
@@ -2725,7 +2725,8 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
result = Curl_setstropt(&data->set.str[STRING_ALTSVC], argptr);
if(result)
return result;
- (void)Curl_altsvc_load(data->asi, argptr);
+ if(argptr)
+ (void)Curl_altsvc_load(data->asi, argptr);
break;
case CURLOPT_ALTSVC_CTRL:
if(!data->asi) {
diff --git a/libs/libcurl/src/setup-os400.h b/libs/libcurl/src/setup-os400.h
index a3c2a7bdc9..629fd94c46 100644
--- a/libs/libcurl/src/setup-os400.h
+++ b/libs/libcurl/src/setup-os400.h
@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2016, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -206,11 +206,15 @@ extern int Curl_os400_sendto(int sd, char *buffer, int buflen, int flags,
struct sockaddr * dstaddr, int addrlen);
extern int Curl_os400_recvfrom(int sd, char *buffer, int buflen, int flags,
struct sockaddr *fromaddr, int *addrlen);
+extern int Curl_os400_getpeername(int sd, struct sockaddr *addr, int *addrlen);
+extern int Curl_os400_getsockname(int sd, struct sockaddr *addr, int *addrlen);
#define connect Curl_os400_connect
#define bind Curl_os400_bind
#define sendto Curl_os400_sendto
#define recvfrom Curl_os400_recvfrom
+#define getpeername Curl_os400_getpeername
+#define getsockname Curl_os400_getsockname
#ifdef HAVE_LIBZ
#define zlibVersion Curl_os400_zlibVersion
diff --git a/libs/libcurl/src/smb.c b/libs/libcurl/src/smb.c
index f66c05ca42..12f99257fc 100644
--- a/libs/libcurl/src/smb.c
+++ b/libs/libcurl/src/smb.c
@@ -682,7 +682,8 @@ static CURLcode smb_connection_state(struct connectdata *conn, bool *done)
switch(smbc->state) {
case SMB_NEGOTIATE:
- if(h->status || smbc->got < sizeof(*nrsp) + sizeof(smbc->challenge) - 1) {
+ if((smbc->got < sizeof(*nrsp) + sizeof(smbc->challenge) - 1) ||
+ h->status) {
connclose(conn, "SMB: negotiation failed");
return CURLE_COULDNT_CONNECT;
}
diff --git a/libs/libcurl/src/socketpair.c b/libs/libcurl/src/socketpair.c
new file mode 100644
index 0000000000..1f0e2e4a4f
--- /dev/null
+++ b/libs/libcurl/src/socketpair.c
@@ -0,0 +1,118 @@
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.haxx.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+#include "socketpair.h"
+
+#ifndef HAVE_SOCKETPAIR
+#ifdef WIN32
+/*
+ * This is a socketpair() implementation for Windows.
+ */
+#include <string.h>
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#include <windows.h>
+#include <io.h>
+#else
+#ifdef HAVE_NETDB_H
+#include <netdb.h>
+#endif
+#ifdef HAVE_NETINET_IN_H
+#include <netinet/in.h> /* IPPROTO_TCP */
+#endif
+#ifndef INADDR_LOOPBACK
+#define INADDR_LOOPBACK 0x7f000001
+#endif /* !INADDR_LOOPBACK */
+#endif /* !WIN32 */
+
+/* The last 3 #include files should be in this order */
+#include "curl_printf.h"
+#include "curl_memory.h"
+#include "memdebug.h"
+
+int Curl_socketpair(int domain, int type, int protocol,
+ curl_socket_t socks[2])
+{
+ union {
+ struct sockaddr_in inaddr;
+ struct sockaddr addr;
+ } a;
+ curl_socket_t listener;
+ curl_socklen_t addrlen = sizeof(a.inaddr);
+ int reuse = 1;
+ char data[2][12];
+ ssize_t dlen;
+ (void)domain;
+ (void)type;
+ (void)protocol;
+
+ listener = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+ if(listener == CURL_SOCKET_BAD)
+ return -1;
+
+ memset(&a, 0, sizeof(a));
+ a.inaddr.sin_family = AF_INET;
+ a.inaddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ a.inaddr.sin_port = 0;
+
+ socks[0] = socks[1] = CURL_SOCKET_BAD;
+
+ if(setsockopt(listener, SOL_SOCKET, SO_REUSEADDR,
+ (char *)&reuse, (curl_socklen_t)sizeof(reuse)) == -1)
+ goto error;
+ if(bind(listener, &a.addr, sizeof(a.inaddr)) == -1)
+ goto error;
+ if(getsockname(listener, &a.addr, &addrlen) == -1)
+ goto error;
+ if(listen(listener, 1) == -1)
+ goto error;
+ socks[0] = socket(AF_INET, SOCK_STREAM, 0);
+ if(socks[0] == CURL_SOCKET_BAD)
+ goto error;
+ if(connect(socks[0], &a.addr, sizeof(a.inaddr)) == -1)
+ goto error;
+ socks[1] = accept(listener, NULL, NULL);
+ if(socks[1] == CURL_SOCKET_BAD)
+ goto error;
+
+ /* verify that nothing else connected */
+ msnprintf(data[0], sizeof(data[0]), "%p", socks);
+ dlen = strlen(data[0]);
+ if(swrite(socks[0], data[0], dlen) != dlen)
+ goto error;
+ if(sread(socks[1], data[1], sizeof(data[1])) != dlen)
+ goto error;
+ if(memcmp(data[0], data[1], dlen))
+ goto error;
+
+ sclose(listener);
+ return 0;
+
+ error:
+ sclose(listener);
+ sclose(socks[0]);
+ sclose(socks[1]);
+ return -1;
+}
+
+#endif /* ! HAVE_SOCKETPAIR */
diff --git a/libs/libcurl/src/socketpair.h b/libs/libcurl/src/socketpair.h
new file mode 100644
index 0000000000..be9fb24f97
--- /dev/null
+++ b/libs/libcurl/src/socketpair.h
@@ -0,0 +1,36 @@
+#ifndef HEADER_CURL_SOCKETPAIR_H
+#define HEADER_CURL_SOCKETPAIR_H
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.haxx.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+#ifndef HAVE_SOCKETPAIR
+int Curl_socketpair(int domain, int type, int protocol,
+ curl_socket_t socks[2]);
+#else
+#define Curl_socketpair(a,b,c,d) socketpair(a,b,c,d)
+#endif
+
+/* Defined here to allow specific build configs to disable it completely */
+#define USE_SOCKETPAIR 1
+
+#endif /* HEADER_CURL_SOCKETPAIR_H */
diff --git a/libs/libcurl/src/socks.c b/libs/libcurl/src/socks.c
index d8fcc3bbba..6ae98184d1 100644
--- a/libs/libcurl/src/socks.c
+++ b/libs/libcurl/src/socks.c
@@ -38,7 +38,9 @@
#include "timeval.h"
#include "socks.h"
-/* The last #include file should be: */
+/* The last 3 #include files should be in this order */
+#include "curl_printf.h"
+#include "curl_memory.h"
#include "memdebug.h"
/*
@@ -372,8 +374,9 @@ CURLcode Curl_SOCKS5(const char *proxy_user,
o REP Reply field:
o X'00' succeeded
*/
-
- unsigned char socksreq[600]; /* room for large user/pw (255 max each) */
+#define REQUEST_BUFSIZE 600 /* room for large user/pw (255 max each) */
+ unsigned char socksreq[REQUEST_BUFSIZE];
+ char dest[REQUEST_BUFSIZE] = "unknown"; /* printable hostname:port */
int idx;
ssize_t actualread;
ssize_t written;
@@ -605,6 +608,8 @@ CURLcode Curl_SOCKS5(const char *proxy_user,
socksreq[len++] = (char) hostname_len; /* address length */
memcpy(&socksreq[len], hostname, hostname_len); /* address str w/o NULL */
len += hostname_len;
+ msnprintf(dest, sizeof(dest), "%s:%d", hostname, remote_port);
+ infof(data, "SOCKS5 connect to %s (remotely resolved)\n", dest);
}
else {
struct Curl_dns_entry *dns;
@@ -628,8 +633,13 @@ CURLcode Curl_SOCKS5(const char *proxy_user,
if(dns)
hp = dns->addr;
if(hp) {
- char buf[64];
- Curl_printable_address(hp, buf, sizeof(buf));
+ if(Curl_printable_address(hp, dest, sizeof(dest))) {
+ size_t destlen = strlen(dest);
+ msnprintf(dest + destlen, sizeof(dest) - destlen, ":%d", remote_port);
+ }
+ else {
+ strcpy(dest, "unknown");
+ }
if(hp->ai_family == AF_INET) {
int i;
@@ -641,7 +651,7 @@ CURLcode Curl_SOCKS5(const char *proxy_user,
socksreq[len++] = ((unsigned char *)&saddr_in->sin_addr.s_addr)[i];
}
- infof(data, "SOCKS5 connect to IPv4 %s (locally resolved)\n", buf);
+ infof(data, "SOCKS5 connect to IPv4 %s (locally resolved)\n", dest);
}
#ifdef ENABLE_IPV6
else if(hp->ai_family == AF_INET6) {
@@ -655,13 +665,13 @@ CURLcode Curl_SOCKS5(const char *proxy_user,
((unsigned char *)&saddr_in6->sin6_addr.s6_addr)[i];
}
- infof(data, "SOCKS5 connect to IPv6 %s (locally resolved)\n", buf);
+ infof(data, "SOCKS5 connect to IPv6 %s (locally resolved)\n", dest);
}
#endif
else {
hp = NULL; /* fail! */
- failf(data, "SOCKS5 connection to %s not supported\n", buf);
+ failf(data, "SOCKS5 connection to %s not supported\n", dest);
}
Curl_resolv_unlock(data, dns); /* not used anymore from now on */
@@ -756,42 +766,8 @@ CURLcode Curl_SOCKS5(const char *proxy_user,
#endif
if(socksreq[1] != 0) { /* Anything besides 0 is an error */
- if(socksreq[3] == 1) {
- failf(data,
- "Can't complete SOCKS5 connection to %d.%d.%d.%d:%d. (%d)",
- (unsigned char)socksreq[4], (unsigned char)socksreq[5],
- (unsigned char)socksreq[6], (unsigned char)socksreq[7],
- (((unsigned char)socksreq[8] << 8) |
- (unsigned char)socksreq[9]),
- (unsigned char)socksreq[1]);
- }
- else if(socksreq[3] == 3) {
- unsigned char port_upper = (unsigned char)socksreq[len - 2];
- socksreq[len - 2] = 0;
- failf(data,
- "Can't complete SOCKS5 connection to %s:%d. (%d)",
- (char *)&socksreq[5],
- ((port_upper << 8) |
- (unsigned char)socksreq[len - 1]),
- (unsigned char)socksreq[1]);
- socksreq[len - 2] = port_upper;
- }
- else if(socksreq[3] == 4) {
- failf(data,
- "Can't complete SOCKS5 connection to %02x%02x:%02x%02x:"
- "%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%d. (%d)",
- (unsigned char)socksreq[4], (unsigned char)socksreq[5],
- (unsigned char)socksreq[6], (unsigned char)socksreq[7],
- (unsigned char)socksreq[8], (unsigned char)socksreq[9],
- (unsigned char)socksreq[10], (unsigned char)socksreq[11],
- (unsigned char)socksreq[12], (unsigned char)socksreq[13],
- (unsigned char)socksreq[14], (unsigned char)socksreq[15],
- (unsigned char)socksreq[16], (unsigned char)socksreq[17],
- (unsigned char)socksreq[18], (unsigned char)socksreq[19],
- (((unsigned char)socksreq[20] << 8) |
- (unsigned char)socksreq[21]),
- (unsigned char)socksreq[1]);
- }
+ failf(data, "Can't complete SOCKS5 connection to %s. (%d)",
+ dest, (unsigned char)socksreq[1]);
return CURLE_COULDNT_CONNECT;
}
infof(data, "SOCKS5 request granted.\n");
diff --git a/libs/libcurl/src/strcase.c b/libs/libcurl/src/strcase.c
index 24bcca9327..a894712254 100644
--- a/libs/libcurl/src/strcase.c
+++ b/libs/libcurl/src/strcase.c
@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -93,6 +93,75 @@ char Curl_raw_toupper(char in)
return in;
}
+
+/* Portable, consistent tolower (remember EBCDIC). Do not use tolower() because
+ its behavior is altered by the current locale. */
+char Curl_raw_tolower(char in)
+{
+#if !defined(CURL_DOES_CONVERSIONS)
+ if(in >= 'A' && in <= 'Z')
+ return (char)('a' + in - 'A');
+#else
+ switch(in) {
+ case 'A':
+ return 'a';
+ case 'B':
+ return 'b';
+ case 'C':
+ return 'c';
+ case 'D':
+ return 'd';
+ case 'E':
+ return 'e';
+ case 'F':
+ return 'f';
+ case 'G':
+ return 'g';
+ case 'H':
+ return 'h';
+ case 'I':
+ return 'i';
+ case 'J':
+ return 'j';
+ case 'K':
+ return 'k';
+ case 'L':
+ return 'l';
+ case 'M':
+ return 'm';
+ case 'N':
+ return 'n';
+ case 'O':
+ return 'o';
+ case 'P':
+ return 'p';
+ case 'Q':
+ return 'q';
+ case 'R':
+ return 'r';
+ case 'S':
+ return 's';
+ case 'T':
+ return 't';
+ case 'U':
+ return 'u';
+ case 'V':
+ return 'v';
+ case 'W':
+ return 'w';
+ case 'X':
+ return 'x';
+ case 'Y':
+ return 'y';
+ case 'Z':
+ return 'z';
+ }
+#endif
+
+ return in;
+}
+
+
/*
* Curl_strcasecompare() is for doing "raw" case insensitive strings. This is
* meant to be locale independent and only compare strings we know are safe
@@ -165,6 +234,21 @@ void Curl_strntoupper(char *dest, const char *src, size_t n)
} while(*src++ && --n);
}
+/* Copy a lower case version of the string from src to dest. The
+ * strings may overlap. No more than n characters of the string are copied
+ * (including any NUL) and the destination string will NOT be
+ * NUL-terminated if that limit is reached.
+ */
+void Curl_strntolower(char *dest, const char *src, size_t n)
+{
+ if(n < 1)
+ return;
+
+ do {
+ *dest++ = Curl_raw_tolower(*src);
+ } while(*src++ && --n);
+}
+
/* --- public functions --- */
int curl_strequal(const char *first, const char *second)
diff --git a/libs/libcurl/src/strcase.h b/libs/libcurl/src/strcase.h
index 6fee3840e6..baa768b2b2 100644
--- a/libs/libcurl/src/strcase.h
+++ b/libs/libcurl/src/strcase.h
@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2016, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -40,11 +40,13 @@ int Curl_safe_strcasecompare(const char *first, const char *second);
int Curl_strncasecompare(const char *first, const char *second, size_t max);
char Curl_raw_toupper(char in);
+char Curl_raw_tolower(char in);
/* checkprefix() is a shorter version of the above, used when the first
argument is zero-byte terminated */
#define checkprefix(a,b) curl_strnequal(a,b,strlen(a))
void Curl_strntoupper(char *dest, const char *src, size_t n);
+void Curl_strntolower(char *dest, const char *src, size_t n);
#endif /* HEADER_CURL_STRCASE_H */
diff --git a/libs/libcurl/src/transfer.c b/libs/libcurl/src/transfer.c
index ef0d806384..d0d4aeb500 100644
--- a/libs/libcurl/src/transfer.c
+++ b/libs/libcurl/src/transfer.c
@@ -776,14 +776,14 @@ static CURLcode readwrite_data(struct Curl_easy *data,
* and writes away the data. The returned 'nread' holds the number
* of actual data it wrote to the client.
*/
-
+ CURLcode extra;
CHUNKcode res =
- Curl_httpchunk_read(conn, k->str, nread, &nread);
+ Curl_httpchunk_read(conn, k->str, nread, &nread, &extra);
if(CHUNKE_OK < res) {
- if(CHUNKE_WRITE_ERROR == res) {
- failf(data, "Failed writing data");
- return CURLE_WRITE_ERROR;
+ if(CHUNKE_PASSTHRU_ERROR == res) {
+ failf(data, "Failed reading the chunked-encoded stream");
+ return extra;
}
failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
return CURLE_RECV_ERROR;
@@ -1510,6 +1510,7 @@ CURLcode Curl_pretransfer(struct Curl_easy *data)
}
}
#endif
+ Curl_http2_init_state(&data->state);
}
return result;
@@ -1591,7 +1592,8 @@ CURLcode Curl_follow(struct Curl_easy *data,
DEBUGASSERT(data->state.uh);
uc = curl_url_set(data->state.uh, CURLUPART_URL, newurl,
- (type == FOLLOW_FAKE) ? CURLU_NON_SUPPORT_SCHEME : 0);
+ (type == FOLLOW_FAKE) ? CURLU_NON_SUPPORT_SCHEME :
+ ((type == FOLLOW_REDIR) ? CURLU_URLENCODE : 0) );
if(uc) {
if(type != FOLLOW_FAKE)
return Curl_uc_to_curlcode(uc);
diff --git a/libs/libcurl/src/url.c b/libs/libcurl/src/url.c
index b7cf7bedd3..8285474fd7 100644
--- a/libs/libcurl/src/url.c
+++ b/libs/libcurl/src/url.c
@@ -317,13 +317,17 @@ static void up_free(struct Curl_easy *data)
* when curl_easy_perform() is invoked.
*/
-CURLcode Curl_close(struct Curl_easy *data)
+CURLcode Curl_close(struct Curl_easy **datap)
{
struct Curl_multi *m;
+ struct Curl_easy *data;
- if(!data)
+ if(!datap || !*datap)
return CURLE_OK;
+ data = *datap;
+ *datap = NULL;
+
Curl_expire_clear(data); /* shut off timers */
m = data->multi;
@@ -374,7 +378,7 @@ CURLcode Curl_close(struct Curl_easy *data)
Curl_safefree(data->state.buffer);
Curl_safefree(data->state.headerbuff);
Curl_safefree(data->state.ulbuf);
- Curl_flush_cookies(data, 1);
+ Curl_flush_cookies(data, TRUE);
#ifdef USE_ALTSVC
Curl_altsvc_save(data->asi, data->set.str[STRING_ALTSVC]);
Curl_altsvc_cleanup(data->asi);
@@ -399,6 +403,10 @@ CURLcode Curl_close(struct Curl_easy *data)
Curl_share_unlock(data, CURL_LOCK_DATA_SHARE);
}
+ free(data->req.doh.probe[0].serverdoh.memory);
+ free(data->req.doh.probe[1].serverdoh.memory);
+ curl_slist_free_all(data->req.doh.headers);
+
/* destruct wildcard structures if it is needed */
Curl_wildcard_dtor(&data->wildcard);
Curl_freeset(data);
@@ -612,8 +620,6 @@ CURLcode Curl_open(struct Curl_easy **curl)
data->progress.flags |= PGRS_HIDE;
data->state.current_speed = -1; /* init to negative == impossible */
-
- Curl_http2_init_state(&data->state);
}
}
@@ -1041,7 +1047,7 @@ ConnectionExists(struct Curl_easy *data,
/* We can't multiplex if we don't know anything about the server */
if(canmultiplex) {
if(bundle->multiuse == BUNDLE_UNKNOWN) {
- if((bundle->multiuse == BUNDLE_UNKNOWN) && data->set.pipewait) {
+ if(data->set.pipewait) {
infof(data, "Server doesn't support multiplex yet, wait\n");
*waitpipe = TRUE;
Curl_conncache_unlock(data);
@@ -1277,8 +1283,14 @@ ConnectionExists(struct Curl_easy *data,
partway through a handshake!) */
if(wantNTLMhttp) {
if(strcmp(needle->user, check->user) ||
- strcmp(needle->passwd, check->passwd))
+ strcmp(needle->passwd, check->passwd)) {
+
+ /* we prefer a credential match, but this is at least a connection
+ that can be reused and "upgraded" to NTLM */
+ if(check->http_ntlm_state == NTLMSTATE_NONE)
+ chosen = check;
continue;
+ }
}
else if(check->http_ntlm_state != NTLMSTATE_NONE) {
/* Connection is using NTLM auth but we don't want NTLM */
@@ -1787,6 +1799,7 @@ static CURLcode parseurlandfillconn(struct Curl_easy *data,
}
if(!data->set.uh) {
+ char *newurl;
uc = curl_url_set(uh, CURLUPART_URL, data->change.url,
CURLU_GUESS_SCHEME |
CURLU_NON_SUPPORT_SCHEME |
@@ -1797,6 +1810,15 @@ static CURLcode parseurlandfillconn(struct Curl_easy *data,
DEBUGF(infof(data, "curl_url_set rejected %s\n", data->change.url));
return Curl_uc_to_curlcode(uc);
}
+
+ /* after it was parsed, get the generated normalized version */
+ uc = curl_url_get(uh, CURLUPART_URL, &newurl, 0);
+ if(uc)
+ return Curl_uc_to_curlcode(uc);
+ if(data->change.url_alloc)
+ free(data->change.url);
+ data->change.url = newurl;
+ data->change.url_alloc = TRUE;
}
uc = curl_url_get(uh, CURLUPART_SCHEME, &data->state.up.scheme, 0);
@@ -1863,11 +1885,7 @@ static CURLcode parseurlandfillconn(struct Curl_easy *data,
(void)curl_url_get(uh, CURLUPART_QUERY, &data->state.up.query, 0);
hostname = data->state.up.hostname;
- if(!hostname)
- /* this is for file:// transfers, get a dummy made */
- hostname = (char *)"";
-
- if(hostname[0] == '[') {
+ if(hostname && hostname[0] == '[') {
/* This looks like an IPv6 address literal. See if there is an address
scope. */
size_t hlen;
@@ -1881,7 +1899,7 @@ static CURLcode parseurlandfillconn(struct Curl_easy *data,
}
/* make sure the connect struct gets its own copy of the host name */
- conn->host.rawalloc = strdup(hostname);
+ conn->host.rawalloc = strdup(hostname ? hostname : "");
if(!conn->host.rawalloc)
return CURLE_OUT_OF_MEMORY;
conn->host.name = conn->host.rawalloc;
@@ -1969,6 +1987,8 @@ void Curl_free_request_state(struct Curl_easy *data)
{
Curl_safefree(data->req.protop);
Curl_safefree(data->req.newurl);
+ Curl_close(&data->req.doh.probe[0].easy);
+ Curl_close(&data->req.doh.probe[1].easy);
}
@@ -2754,13 +2774,6 @@ static CURLcode set_login(struct connectdata *conn)
result = CURLE_OUT_OF_MEMORY;
}
- /* if there's a user without password, consider password blank */
- if(conn->user && !conn->passwd) {
- conn->passwd = strdup("");
- if(!conn->passwd)
- result = CURLE_OUT_OF_MEMORY;
- }
-
return result;
}
@@ -3519,6 +3532,10 @@ static CURLcode create_conn(struct Curl_easy *data,
data->set.str[STRING_SSL_CIPHER13_LIST_ORIG];
data->set.proxy_ssl.primary.cipher_list13 =
data->set.str[STRING_SSL_CIPHER13_LIST_PROXY];
+ data->set.ssl.primary.pinned_key =
+ data->set.str[STRING_SSL_PINNEDPUBLICKEY_ORIG];
+ data->set.proxy_ssl.primary.pinned_key =
+ data->set.str[STRING_SSL_PINNEDPUBLICKEY_PROXY];
data->set.ssl.CRLfile = data->set.str[STRING_SSL_CRLFILE_ORIG];
data->set.proxy_ssl.CRLfile = data->set.str[STRING_SSL_CRLFILE_PROXY];
@@ -3815,7 +3832,9 @@ CURLcode Curl_setup_conn(struct connectdata *conn,
}
else {
Curl_pgrsTime(data, TIMER_CONNECT); /* we're connected already */
- Curl_pgrsTime(data, TIMER_APPCONNECT); /* we're connected already */
+ if(conn->ssl[FIRSTSOCKET].use ||
+ (conn->handler->protocol & PROTO_FAMILY_SSH))
+ Curl_pgrsTime(data, TIMER_APPCONNECT); /* we're connected already */
conn->bits.tcpconnect[FIRSTSOCKET] = TRUE;
*protocol_done = TRUE;
Curl_updateconninfo(conn, conn->sock[FIRSTSOCKET]);
diff --git a/libs/libcurl/src/url.h b/libs/libcurl/src/url.h
index f4d611adda..053fbdffc2 100644
--- a/libs/libcurl/src/url.h
+++ b/libs/libcurl/src/url.h
@@ -49,7 +49,7 @@ CURLcode Curl_init_userdefined(struct Curl_easy *data);
void Curl_freeset(struct Curl_easy * data);
CURLcode Curl_uc_to_curlcode(CURLUcode uc);
-CURLcode Curl_close(struct Curl_easy *data); /* opposite of curl_open() */
+CURLcode Curl_close(struct Curl_easy **datap); /* opposite of curl_open() */
CURLcode Curl_connect(struct Curl_easy *, bool *async, bool *protocol_connect);
CURLcode Curl_disconnect(struct Curl_easy *data,
struct connectdata *, bool dead_connection);
diff --git a/libs/libcurl/src/urlapi.c b/libs/libcurl/src/urlapi.c
index a0ee331dab..fa514bce53 100644
--- a/libs/libcurl/src/urlapi.c
+++ b/libs/libcurl/src/urlapi.c
@@ -64,6 +64,7 @@ struct Curl_URL {
char *fragment;
char *scratch; /* temporary scratch area */
+ char *temppath; /* temporary path pointer */
long portnum; /* the numerical version */
};
@@ -82,6 +83,7 @@ static void free_urlhandle(struct Curl_URL *u)
free(u->query);
free(u->fragment);
free(u->scratch);
+ free(u->temppath);
}
/* move the full contents of one handle onto another and
@@ -351,7 +353,7 @@ static char *concat_url(const char *base, const char *relurl)
else {
/* We got a new absolute path for this server */
- if((relurl[0] == '/') && (relurl[1] == '/')) {
+ if(relurl[1] == '/') {
/* the new URL starts with //, just keep the protocol part from the
original one */
*protsep = 0;
@@ -596,8 +598,12 @@ static CURLUcode hostname_check(struct Curl_URL *u, char *hostname)
size_t hlen = strlen(hostname);
if(hostname[0] == '[') {
+#ifdef ENABLE_IPV6
char dest[16]; /* fits a binary IPv6 address */
+#endif
const char *l = "0123456789abcdefABCDEF:.";
+ if(hlen < 5) /* '[::1]' is the shortest possible valid string */
+ return CURLUE_MALFORMED_INPUT;
hostname++;
hlen -= 2;
@@ -784,6 +790,7 @@ static CURLUcode seturl(const char *url, CURLU *u, unsigned int flags)
if(junkscan(schemep))
return CURLUE_MALFORMED_INPUT;
+
}
else {
/* no scheme! */
@@ -804,11 +811,14 @@ static CURLUcode seturl(const char *url, CURLU *u, unsigned int flags)
p++;
len = p - hostp;
- if(!len)
- return CURLUE_MALFORMED_INPUT;
-
- memcpy(hostname, hostp, len);
- hostname[len] = 0;
+ if(len) {
+ memcpy(hostname, hostp, len);
+ hostname[len] = 0;
+ }
+ else {
+ if(!(flags & CURLU_NO_AUTHORITY))
+ return CURLUE_MALFORMED_INPUT;
+ }
if((flags & CURLU_GUESS_SCHEME) && !schemep) {
/* legacy curl-style guess based on host name */
@@ -843,35 +853,60 @@ static CURLUcode seturl(const char *url, CURLU *u, unsigned int flags)
if(junkscan(path))
return CURLUE_MALFORMED_INPUT;
- query = strchr(path, '?');
- if(query)
- *query++ = 0;
+ if((flags & CURLU_URLENCODE) && path[0]) {
+ /* worst case output length is 3x the original! */
+ char *newp = malloc(strlen(path) * 3);
+ if(!newp)
+ return CURLUE_OUT_OF_MEMORY;
+ path_alloced = TRUE;
+ strcpy_url(newp, path, TRUE); /* consider it relative */
+ u->temppath = path = newp;
+ }
- fragment = strchr(query?query:path, '#');
- if(fragment)
+ fragment = strchr(path, '#');
+ if(fragment) {
*fragment++ = 0;
+ if(fragment[0]) {
+ u->fragment = strdup(fragment);
+ if(!u->fragment)
+ return CURLUE_OUT_OF_MEMORY;
+ }
+ }
+
+ query = strchr(path, '?');
+ if(query) {
+ *query++ = 0;
+ /* done even if the query part is a blank string */
+ u->query = strdup(query);
+ if(!u->query)
+ return CURLUE_OUT_OF_MEMORY;
+ }
if(!path[0])
- /* if there's no path set, unset */
+ /* if there's no path left set, unset */
path = NULL;
- else if(!(flags & CURLU_PATH_AS_IS)) {
- /* sanitise paths and remove ../ and ./ sequences according to RFC3986 */
- char *newp = Curl_dedotdotify(path);
- if(!newp)
- return CURLUE_OUT_OF_MEMORY;
+ else {
+ if(!(flags & CURLU_PATH_AS_IS)) {
+ /* remove ../ and ./ sequences according to RFC3986 */
+ char *newp = Curl_dedotdotify(path);
+ if(!newp)
+ return CURLUE_OUT_OF_MEMORY;
- if(strcmp(newp, path)) {
- /* if we got a new version */
- path = newp;
- path_alloced = TRUE;
+ if(strcmp(newp, path)) {
+ /* if we got a new version */
+ if(path_alloced)
+ Curl_safefree(u->temppath);
+ u->temppath = path = newp;
+ path_alloced = TRUE;
+ }
+ else
+ free(newp);
}
- else
- free(newp);
- }
- if(path) {
+
u->path = path_alloced?path:strdup(path);
if(!u->path)
return CURLUE_OUT_OF_MEMORY;
+ u->temppath = NULL; /* used now */
}
if(hostname) {
@@ -889,28 +924,22 @@ static CURLUcode seturl(const char *url, CURLU *u, unsigned int flags)
if(result)
return result;
- result = hostname_check(u, hostname);
- if(result)
- return result;
+ if(0 == strlen(hostname) && (flags & CURLU_NO_AUTHORITY)) {
+ /* Skip hostname check, it's allowed to be empty. */
+ }
+ else {
+ result = hostname_check(u, hostname);
+ if(result)
+ return result;
+ }
u->host = strdup(hostname);
if(!u->host)
return CURLUE_OUT_OF_MEMORY;
}
- if(query) {
- u->query = strdup(query);
- if(!u->query)
- return CURLUE_OUT_OF_MEMORY;
- }
- if(fragment && fragment[0]) {
- u->fragment = strdup(fragment);
- if(!u->fragment)
- return CURLUE_OUT_OF_MEMORY;
- }
-
- free(u->scratch);
- u->scratch = NULL;
+ Curl_safefree(u->scratch);
+ Curl_safefree(u->temppath);
return CURLUE_OK;
}
@@ -1075,24 +1104,23 @@ CURLUcode curl_url_get(CURLU *u, CURLUPart what,
else
return CURLUE_NO_SCHEME;
- if(scheme) {
- h = Curl_builtin_scheme(scheme);
- if(!port && (flags & CURLU_DEFAULT_PORT)) {
- /* there's no stored port number, but asked to deliver
- a default one for the scheme */
- if(h) {
- msnprintf(portbuf, sizeof(portbuf), "%ld", h->defport);
- port = portbuf;
- }
- }
- else if(port) {
- /* there is a stored port number, but asked to inhibit if it matches
- the default one for the scheme */
- if(h && (h->defport == u->portnum) &&
- (flags & CURLU_NO_DEFAULT_PORT))
- port = NULL;
+ h = Curl_builtin_scheme(scheme);
+ if(!port && (flags & CURLU_DEFAULT_PORT)) {
+ /* there's no stored port number, but asked to deliver
+ a default one for the scheme */
+ if(h) {
+ msnprintf(portbuf, sizeof(portbuf), "%ld", h->defport);
+ port = portbuf;
}
}
+ else if(port) {
+ /* there is a stored port number, but asked to inhibit if it matches
+ the default one for the scheme */
+ if(h && (h->defport == u->portnum) &&
+ (flags & CURLU_NO_DEFAULT_PORT))
+ port = NULL;
+ }
+
if(h && !(h->flags & PROTOPT_URLOPTIONS))
options = NULL;
@@ -1340,7 +1368,8 @@ CURLUcode curl_url_set(CURLU *u, CURLUPart what,
default:
return CURLUE_UNKNOWN_PART;
}
- if(storep) {
+ DEBUGASSERT(storep);
+ {
const char *newp = part;
size_t nalloc = strlen(part);
@@ -1432,9 +1461,14 @@ CURLUcode curl_url_set(CURLU *u, CURLUPart what,
}
if(what == CURLUPART_HOST) {
- if(hostname_check(u, (char *)newp)) {
- free((char *)newp);
- return CURLUE_MALFORMED_INPUT;
+ if(0 == strlen(newp) && (flags & CURLU_NO_AUTHORITY)) {
+ /* Skip hostname check, it's allowed to be empty. */
+ }
+ else {
+ if(hostname_check(u, (char *)newp)) {
+ free((char *)newp);
+ return CURLUE_MALFORMED_INPUT;
+ }
}
}
diff --git a/libs/libcurl/src/urldata.h b/libs/libcurl/src/urldata.h
index acc1fd1b9d..f9365b2e68 100644
--- a/libs/libcurl/src/urldata.h
+++ b/libs/libcurl/src/urldata.h
@@ -68,6 +68,7 @@
#define PROTO_FAMILY_POP3 (CURLPROTO_POP3|CURLPROTO_POP3S)
#define PROTO_FAMILY_SMB (CURLPROTO_SMB|CURLPROTO_SMBS)
#define PROTO_FAMILY_SMTP (CURLPROTO_SMTP|CURLPROTO_SMTPS)
+#define PROTO_FAMILY_SSH (CURLPROTO_SCP|CURLPROTO_SFTP)
#define DEFAULT_CONNCACHE_SIZE 5
@@ -158,7 +159,13 @@ typedef ssize_t (Curl_recv)(struct connectdata *conn, /* connection data */
((x) && ((x)->magic == CURLEASY_MAGIC_NUMBER))
/* the type we use for storing a single boolean bit */
+#ifdef _MSC_VER
+typedef bool bit;
+#define BIT(x) bool x
+#else
typedef unsigned int bit;
+#define BIT(x) bit x:1
+#endif
#ifdef HAVE_GSSAPI
/* Types needed for krb5-ftp connections */
@@ -166,7 +173,7 @@ struct krb5buffer {
void *data;
size_t size;
size_t index;
- bit eof_flag:1;
+ BIT(eof_flag);
};
enum protection_level {
@@ -209,7 +216,7 @@ struct ssl_connect_data {
#if defined(USE_SSL)
struct ssl_backend_data *backend;
#endif
- bit use:1;
+ BIT(use);
};
struct ssl_primary_config {
@@ -222,10 +229,11 @@ struct ssl_primary_config {
char *egdsocket; /* path to file containing the EGD daemon socket */
char *cipher_list; /* list of ciphers to use */
char *cipher_list13; /* list of TLS 1.3 cipher suites to use */
- bit verifypeer:1; /* set TRUE if this is desired */
- bit verifyhost:1; /* set TRUE if CN/SAN must match hostname */
- bit verifystatus:1; /* set TRUE if certificate status must be checked */
- bit sessionid:1; /* cache session IDs or not */
+ char *pinned_key;
+ BIT(verifypeer); /* set TRUE if this is desired */
+ BIT(verifyhost); /* set TRUE if CN/SAN must match hostname */
+ BIT(verifystatus); /* set TRUE if certificate status must be checked */
+ BIT(sessionid); /* cache session IDs or not */
};
struct ssl_config_data {
@@ -245,10 +253,10 @@ struct ssl_config_data {
char *password; /* TLS password (for, e.g., SRP) */
enum CURL_TLSAUTH authtype; /* TLS authentication type (default SRP) */
#endif
- bit certinfo:1; /* gather lots of certificate info */
- bit falsestart:1;
- bit enable_beast:1; /* allow this flaw for interoperability's sake*/
- bit no_revoke:1; /* disable SSL certificate revocation checks */
+ BIT(certinfo); /* gather lots of certificate info */
+ BIT(falsestart);
+ BIT(enable_beast); /* allow this flaw for interoperability's sake*/
+ BIT(no_revoke); /* disable SSL certificate revocation checks */
};
struct ssl_general_config {
@@ -291,8 +299,8 @@ struct digestdata {
char *qop;
char *algorithm;
int nc; /* nounce count */
- bit stale:1; /* set true for re-negotiation */
- bit userhash:1;
+ BIT(stale); /* set true for re-negotiation */
+ BIT(userhash);
#endif
};
@@ -386,10 +394,10 @@ struct negotiatedata {
size_t output_token_length;
#endif
#endif
- bool noauthpersist;
- bool havenoauthpersist;
- bool havenegdata;
- bool havemultiplerequests;
+ BIT(noauthpersist);
+ BIT(havenoauthpersist);
+ BIT(havenegdata);
+ BIT(havemultiplerequests);
};
#endif
@@ -403,64 +411,64 @@ struct ConnectBits {
is complete */
bool tcpconnect[2]; /* the TCP layer (or similar) is connected, this is set
the first time on the first connect function call */
- bit close:1; /* if set, we close the connection after this request */
- bit reuse:1; /* if set, this is a re-used connection */
- bit altused:1; /* this is an alt-svc "redirect" */
- bit conn_to_host:1; /* if set, this connection has a "connect to host"
- that overrides the host in the URL */
- bit conn_to_port:1; /* if set, this connection has a "connect to port"
- that overrides the port in the URL (remote port) */
- bit proxy:1; /* if set, this transfer is done through a proxy - any type */
- bit httpproxy:1; /* if set, this transfer is done through a http proxy */
- bit socksproxy:1; /* if set, this transfer is done through a socks proxy */
- bit user_passwd:1; /* do we use user+password for this connection? */
- bit proxy_user_passwd:1; /* user+password for the proxy? */
- bit ipv6_ip:1; /* we communicate with a remote site specified with pure IPv6
- IP address */
- bit ipv6:1; /* we communicate with a site using an IPv6 address */
- bit do_more:1; /* this is set TRUE if the ->curl_do_more() function is
- supposed to be called, after ->curl_do() */
- bit protoconnstart:1;/* the protocol layer has STARTED its operation after
- the TCP layer connect */
- bit retry:1; /* this connection is about to get closed and then
- re-attempted at another connection. */
- bit tunnel_proxy:1; /* if CONNECT is used to "tunnel" through the proxy.
- This is implicit when SSL-protocols are used through
- proxies, but can also be enabled explicitly by
- apps */
- bit authneg:1; /* TRUE when the auth phase has started, which means
- that we are creating a request with an auth header,
- but it is not the final request in the auth
- negotiation. */
- bit rewindaftersend:1;/* TRUE when the sending couldn't be stopped even
- though it will be discarded. When the whole send
- operation is done, we must call the data rewind
- callback. */
+ BIT(close); /* if set, we close the connection after this request */
+ BIT(reuse); /* if set, this is a re-used connection */
+ BIT(altused); /* this is an alt-svc "redirect" */
+ BIT(conn_to_host); /* if set, this connection has a "connect to host"
+ that overrides the host in the URL */
+ BIT(conn_to_port); /* if set, this connection has a "connect to port"
+ that overrides the port in the URL (remote port) */
+ BIT(proxy); /* if set, this transfer is done through a proxy - any type */
+ BIT(httpproxy); /* if set, this transfer is done through a http proxy */
+ BIT(socksproxy); /* if set, this transfer is done through a socks proxy */
+ BIT(user_passwd); /* do we use user+password for this connection? */
+ BIT(proxy_user_passwd); /* user+password for the proxy? */
+ BIT(ipv6_ip); /* we communicate with a remote site specified with pure IPv6
+ IP address */
+ BIT(ipv6); /* we communicate with a site using an IPv6 address */
+ BIT(do_more); /* this is set TRUE if the ->curl_do_more() function is
+ supposed to be called, after ->curl_do() */
+ BIT(protoconnstart);/* the protocol layer has STARTED its operation after
+ the TCP layer connect */
+ BIT(retry); /* this connection is about to get closed and then
+ re-attempted at another connection. */
+ BIT(tunnel_proxy); /* if CONNECT is used to "tunnel" through the proxy.
+ This is implicit when SSL-protocols are used through
+ proxies, but can also be enabled explicitly by
+ apps */
+ BIT(authneg); /* TRUE when the auth phase has started, which means
+ that we are creating a request with an auth header,
+ but it is not the final request in the auth
+ negotiation. */
+ BIT(rewindaftersend);/* TRUE when the sending couldn't be stopped even
+ though it will be discarded. When the whole send
+ operation is done, we must call the data rewind
+ callback. */
#ifndef CURL_DISABLE_FTP
- bit ftp_use_epsv:1; /* As set with CURLOPT_FTP_USE_EPSV, but if we find out
- EPSV doesn't work we disable it for the forthcoming
- requests */
- bit ftp_use_eprt:1; /* As set with CURLOPT_FTP_USE_EPRT, but if we find out
- EPRT doesn't work we disable it for the forthcoming
- requests */
- bit ftp_use_data_ssl:1; /* Enabled SSL for the data connection */
+ BIT(ftp_use_epsv); /* As set with CURLOPT_FTP_USE_EPSV, but if we find out
+ EPSV doesn't work we disable it for the forthcoming
+ requests */
+ BIT(ftp_use_eprt); /* As set with CURLOPT_FTP_USE_EPRT, but if we find out
+ EPRT doesn't work we disable it for the forthcoming
+ requests */
+ BIT(ftp_use_data_ssl); /* Enabled SSL for the data connection */
#endif
- bit netrc:1; /* name+password provided by netrc */
- bit userpwd_in_url:1; /* name+password found in url */
- bit stream_was_rewound:1; /* The stream was rewound after a request read
- past the end of its response byte boundary */
- bit proxy_connect_closed:1; /* TRUE if a proxy disconnected the connection
- in a CONNECT request with auth, so that
- libcurl should reconnect and continue. */
- bit bound:1; /* set true if bind() has already been done on this socket/
- connection */
- bit type_set:1; /* type= was used in the URL */
- bit multiplex:1; /* connection is multiplexed */
- bit tcp_fastopen:1; /* use TCP Fast Open */
- bit tls_enable_npn:1; /* TLS NPN extension? */
- bit tls_enable_alpn:1; /* TLS ALPN extension? */
- bit socksproxy_connecting:1; /* connecting through a socks proxy */
- bit connect_only:1;
+ BIT(netrc); /* name+password provided by netrc */
+ BIT(userpwd_in_url); /* name+password found in url */
+ BIT(stream_was_rewound); /* The stream was rewound after a request read
+ past the end of its response byte boundary */
+ BIT(proxy_connect_closed); /* TRUE if a proxy disconnected the connection
+ in a CONNECT request with auth, so that
+ libcurl should reconnect and continue. */
+ BIT(bound); /* set true if bind() has already been done on this socket/
+ connection */
+ BIT(type_set); /* type= was used in the URL */
+ BIT(multiplex); /* connection is multiplexed */
+ BIT(tcp_fastopen); /* use TCP Fast Open */
+ BIT(tls_enable_npn); /* TLS NPN extension? */
+ BIT(tls_enable_alpn); /* TLS ALPN extension? */
+ BIT(socksproxy_connecting); /* connecting through a socks proxy */
+ BIT(connect_only);
};
struct hostname {
@@ -493,7 +501,7 @@ struct Curl_async {
struct Curl_dns_entry *dns;
int status; /* if done is TRUE, this is the status from the callback */
void *os_specific; /* 'struct thread_data' for Windows */
- bit done:1; /* set TRUE when the lookup is complete */
+ BIT(done); /* set TRUE when the lookup is complete */
};
#define FIRSTSOCKET 0
@@ -614,20 +622,20 @@ struct SingleRequest {
#ifndef CURL_DISABLE_DOH
struct dohdata doh; /* DoH specific data for this request */
#endif
- bit header:1; /* incoming data has HTTP header */
- bit content_range:1; /* set TRUE if Content-Range: was found */
- bit upload_done:1; /* set to TRUE when doing chunked transfer-encoding
- upload and we're uploading the last chunk */
- bit ignorebody:1; /* we read a response-body but we ignore it! */
- bit http_bodyless:1; /* HTTP response status code is between 100 and 199,
- 204 or 304 */
- bit chunk:1; /* if set, this is a chunked transfer-encoding */
- bit upload_chunky:1; /* set TRUE if we are doing chunked transfer-encoding
- on upload */
- bit getheader:1; /* TRUE if header parsing is wanted */
- bit forbidchunk:1; /* used only to explicitly forbid chunk-upload for
- specific upload buffers. See readmoredata() in http.c
- for details. */
+ BIT(header); /* incoming data has HTTP header */
+ BIT(content_range); /* set TRUE if Content-Range: was found */
+ BIT(upload_done); /* set to TRUE when doing chunked transfer-encoding
+ upload and we're uploading the last chunk */
+ BIT(ignorebody); /* we read a response-body but we ignore it! */
+ BIT(http_bodyless); /* HTTP response status code is between 100 and 199,
+ 204 or 304 */
+ BIT(chunk); /* if set, this is a chunked transfer-encoding */
+ BIT(upload_chunky); /* set TRUE if we are doing chunked transfer-encoding
+ on upload */
+ BIT(getheader); /* TRUE if header parsing is wanted */
+ BIT(forbidchunk); /* used only to explicitly forbid chunk-upload for
+ specific upload buffers. See readmoredata() in http.c
+ for details. */
};
/*
@@ -776,8 +784,8 @@ struct http_connect_state {
TUNNEL_CONNECT, /* CONNECT has been sent off */
TUNNEL_COMPLETE /* CONNECT response received completely */
} tunnel_state;
- bit chunked_encoding:1;
- bit close_connection:1;
+ BIT(chunked_encoding);
+ BIT(close_connection);
};
struct ldapconninfo;
@@ -952,7 +960,7 @@ struct connectdata {
} allocptr;
#ifdef HAVE_GSSAPI
- bit sec_complete:1; /* if Kerberos is enabled for this connection */
+ BIT(sec_complete); /* if Kerberos is enabled for this connection */
enum protection_level command_prot;
enum protection_level data_prot;
enum protection_level request_data_prot;
@@ -1045,16 +1053,16 @@ struct connectdata {
#ifdef USE_UNIX_SOCKETS
char *unix_domain_socket;
- bit abstract_unix_socket:1;
+ BIT(abstract_unix_socket);
#endif
- bit tls_upgraded:1;
+ BIT(tls_upgraded);
/* the two following *_inuse fields are only flags, not counters in any way.
If TRUE it means the channel is in use, and if FALSE it means the channel
is up for grabs by one. */
- bit readchannel_inuse:1; /* whether the read channel is in use by an easy
- handle */
- bit writechannel_inuse:1; /* whether the write channel is in use by an easy
- handle */
+ BIT(readchannel_inuse); /* whether the read channel is in use by an easy
+ handle */
+ BIT(writechannel_inuse); /* whether the write channel is in use by an easy
+ handle */
};
/* The end of connectdata. */
@@ -1096,8 +1104,8 @@ struct PureInfo {
OpenSSL, GnuTLS, Schannel, NSS and GSKit
builds. Asked for with CURLOPT_CERTINFO
/ CURLINFO_CERTINFO */
- bit timecond:1; /* set to TRUE if the time condition didn't match, which
- thus made the document NOT get fetched */
+ BIT(timecond); /* set to TRUE if the time condition didn't match, which
+ thus made the document NOT get fetched */
};
@@ -1144,8 +1152,8 @@ struct Progress {
curl_off_t speeder[ CURR_TIME ];
struct curltime speeder_time[ CURR_TIME ];
int speeder_c;
- bit callback:1; /* set when progress callback is used */
- bit is_t_startransfer_set:1;
+ BIT(callback); /* set when progress callback is used */
+ BIT(is_t_startransfer_set);
};
typedef enum {
@@ -1193,12 +1201,12 @@ struct auth {
unsigned long picked;
unsigned long avail; /* Bitmask for what the server reports to support for
this resource */
- bit done:1; /* TRUE when the auth phase is done and ready to do the
- *actual* request */
- bit multipass:1; /* TRUE if this is not yet authenticated but within the
- auth multipass negotiation */
- bit iestyle:1; /* TRUE if digest should be done IE-style or FALSE if it
- should be RFC compliant */
+ BIT(done); /* TRUE when the auth phase is done and ready to do the
+ actual request */
+ BIT(multipass); /* TRUE if this is not yet authenticated but within the
+ auth multipass negotiation */
+ BIT(iestyle); /* TRUE if digest should be done IE-style or FALSE if it
+ should be RFC compliant */
};
struct Curl_http2_dep {
@@ -1328,7 +1336,7 @@ struct UrlState {
/* do FTP line-end conversions on most platforms */
#define CURL_DO_LINEEND_CONV
/* for FTP downloads: track CRLF sequences that span blocks */
- bit prev_block_had_trailing_cr:1;
+ BIT(prev_block_had_trailing_cr);
/* for FTP downloads: how many CRLFs did we converted to LFs? */
curl_off_t crlf_conversions;
#endif
@@ -1363,32 +1371,33 @@ struct UrlState {
trailers_state trailers_state; /* whether we are sending trailers
and what stage are we at */
#ifdef CURLDEBUG
- bit conncache_lock:1;
+ BIT(conncache_lock);
#endif
/* when curl_easy_perform() is called, the multi handle is "owned" by
the easy handle so curl_easy_cleanup() on such an easy handle will
also close the multi handle! */
- bit multi_owned_by_easy:1;
+ BIT(multi_owned_by_easy);
- bit this_is_a_follow:1; /* this is a followed Location: request */
- bit refused_stream:1; /* this was refused, try again */
- bit errorbuf:1; /* Set to TRUE if the error buffer is already filled in.
+ BIT(this_is_a_follow); /* this is a followed Location: request */
+ BIT(refused_stream); /* this was refused, try again */
+ BIT(errorbuf); /* Set to TRUE if the error buffer is already filled in.
This must be set to FALSE every time _easy_perform() is
called. */
- bit allow_port:1; /* Is set.use_port allowed to take effect or not. This
+ BIT(allow_port); /* Is set.use_port allowed to take effect or not. This
is always set TRUE when curl_easy_perform() is called. */
- bit authproblem:1; /* TRUE if there's some problem authenticating */
+ BIT(authproblem); /* TRUE if there's some problem authenticating */
/* set after initial USER failure, to prevent an authentication loop */
- bit ftp_trying_alternative:1;
- bit wildcardmatch:1; /* enable wildcard matching */
- bit expect100header:1; /* TRUE if we added Expect: 100-continue */
- bit use_range:1;
- bit rangestringalloc:1; /* the range string is malloc()'ed */
- bit done:1; /* set to FALSE when Curl_init_do() is called and set to TRUE
+ BIT(ftp_trying_alternative);
+ BIT(wildcardmatch); /* enable wildcard matching */
+ BIT(expect100header); /* TRUE if we added Expect: 100-continue */
+ BIT(use_range);
+ BIT(rangestringalloc); /* the range string is malloc()'ed */
+ BIT(done); /* set to FALSE when Curl_init_do() is called and set to TRUE
when multi_done() is called, to prevent multi_done() to get
invoked twice when the multi interface is used. */
- bit stream_depends_e:1; /* set or don't set the Exclusive bit */
- bit previouslypending:1; /* this transfer WAS in the multi->pending queue */
+ BIT(stream_depends_e); /* set or don't set the Exclusive bit */
+ BIT(previouslypending); /* this transfer WAS in the multi->pending queue */
+ BIT(cookie_engine);
};
@@ -1406,9 +1415,9 @@ struct DynamicStatic {
curl_easy_setopt(COOKIEFILE) calls */
struct curl_slist *resolve; /* set to point to the set.resolve list when
this should be dealt with in pretransfer */
- bit url_alloc:1; /* URL string is malloc()'ed */
- bit referer_alloc:1; /* referer string is malloc()ed */
- bit wildcard_resolve:1; /* Set to true if any resolve change is a
+ BIT(url_alloc); /* URL string is malloc()'ed */
+ BIT(referer_alloc); /* referer string is malloc()ed */
+ BIT(wildcard_resolve); /* Set to true if any resolve change is a
wildcard */
};
@@ -1688,84 +1697,82 @@ struct UserDefined {
CURLU *uh; /* URL handle for the current parsed URL */
void *trailer_data; /* pointer to pass to trailer data callback */
curl_trailer_callback trailer_callback; /* trailing data callback */
- bit is_fread_set:1; /* has read callback been set to non-NULL? */
- bit is_fwrite_set:1; /* has write callback been set to non-NULL? */
- bit free_referer:1; /* set TRUE if 'referer' points to a string we
+ BIT(is_fread_set); /* has read callback been set to non-NULL? */
+ BIT(is_fwrite_set); /* has write callback been set to non-NULL? */
+ BIT(free_referer); /* set TRUE if 'referer' points to a string we
allocated */
- bit tftp_no_options:1; /* do not send TFTP options requests */
- bit sep_headers:1; /* handle host and proxy headers separately */
- bit cookiesession:1; /* new cookie session? */
- bit crlf:1; /* convert crlf on ftp upload(?) */
- bit strip_path_slash:1; /* strip off initial slash from path */
- bit ssh_compression:1; /* enable SSH compression */
+ BIT(tftp_no_options); /* do not send TFTP options requests */
+ BIT(sep_headers); /* handle host and proxy headers separately */
+ BIT(cookiesession); /* new cookie session? */
+ BIT(crlf); /* convert crlf on ftp upload(?) */
+ BIT(strip_path_slash); /* strip off initial slash from path */
+ BIT(ssh_compression); /* enable SSH compression */
/* Here follows boolean settings that define how to behave during
this session. They are STATIC, set by libcurl users or at least initially
and they don't change during operations. */
- bit get_filetime:1; /* get the time and get of the remote file */
- bit tunnel_thru_httpproxy:1; /* use CONNECT through a HTTP proxy */
- bit prefer_ascii:1; /* ASCII rather than binary */
- bit ftp_append:1; /* append, not overwrite, on upload */
- bit ftp_list_only:1; /* switch FTP command for listing directories */
+ BIT(get_filetime); /* get the time and get of the remote file */
+ BIT(tunnel_thru_httpproxy); /* use CONNECT through a HTTP proxy */
+ BIT(prefer_ascii); /* ASCII rather than binary */
+ BIT(ftp_append); /* append, not overwrite, on upload */
+ BIT(ftp_list_only); /* switch FTP command for listing directories */
#ifndef CURL_DISABLE_FTP
- bit ftp_use_port:1; /* use the FTP PORT command */
- bit ftp_use_epsv:1; /* if EPSV is to be attempted or not */
- bit ftp_use_eprt:1; /* if EPRT is to be attempted or not */
- bit ftp_use_pret:1; /* if PRET is to be used before PASV or not */
- bit ftp_skip_ip:1; /* skip the IP address the FTP server passes on to
+ BIT(ftp_use_port); /* use the FTP PORT command */
+ BIT(ftp_use_epsv); /* if EPSV is to be attempted or not */
+ BIT(ftp_use_eprt); /* if EPRT is to be attempted or not */
+ BIT(ftp_use_pret); /* if PRET is to be used before PASV or not */
+ BIT(ftp_skip_ip); /* skip the IP address the FTP server passes on to
us */
#endif
- bit hide_progress:1; /* don't use the progress meter */
- bit http_fail_on_error:1; /* fail on HTTP error codes >= 400 */
- bit http_keep_sending_on_error:1; /* for HTTP status codes >= 300 */
- bit http_follow_location:1; /* follow HTTP redirects */
- bit http_transfer_encoding:1; /* request compressed HTTP
- transfer-encoding */
- bit allow_auth_to_other_hosts:1;
- bit include_header:1; /* include received protocol headers in data output */
- bit http_set_referer:1; /* is a custom referer used */
- bit http_auto_referer:1; /* set "correct" referer when following
- location: */
- bit opt_no_body:1; /* as set with CURLOPT_NOBODY */
- bit upload:1; /* upload request */
- bit verbose:1; /* output verbosity */
- bit krb:1; /* Kerberos connection requested */
- bit reuse_forbid:1; /* forbidden to be reused, close after use */
- bit reuse_fresh:1; /* do not re-use an existing connection */
-
- bit no_signal:1; /* do not use any signal/alarm handler */
- bit tcp_nodelay:1; /* whether to enable TCP_NODELAY or not */
- bit ignorecl:1; /* ignore content length */
- bit connect_only:1; /* make connection, let application use the socket */
- bit http_te_skip:1; /* pass the raw body data to the user, even when
- transfer-encoded (chunked, compressed) */
- bit http_ce_skip:1; /* pass the raw body data to the user, even when
- content-encoded (chunked, compressed) */
- bit proxy_transfer_mode:1; /* set transfer mode (;type=<a|i>) when doing
- FTP via an HTTP proxy */
+ BIT(hide_progress); /* don't use the progress meter */
+ BIT(http_fail_on_error); /* fail on HTTP error codes >= 400 */
+ BIT(http_keep_sending_on_error); /* for HTTP status codes >= 300 */
+ BIT(http_follow_location); /* follow HTTP redirects */
+ BIT(http_transfer_encoding); /* request compressed HTTP transfer-encoding */
+ BIT(allow_auth_to_other_hosts);
+ BIT(include_header); /* include received protocol headers in data output */
+ BIT(http_set_referer); /* is a custom referer used */
+ BIT(http_auto_referer); /* set "correct" referer when following
+ location: */
+ BIT(opt_no_body); /* as set with CURLOPT_NOBODY */
+ BIT(upload); /* upload request */
+ BIT(verbose); /* output verbosity */
+ BIT(krb); /* Kerberos connection requested */
+ BIT(reuse_forbid); /* forbidden to be reused, close after use */
+ BIT(reuse_fresh); /* do not re-use an existing connection */
+ BIT(no_signal); /* do not use any signal/alarm handler */
+ BIT(tcp_nodelay); /* whether to enable TCP_NODELAY or not */
+ BIT(ignorecl); /* ignore content length */
+ BIT(connect_only); /* make connection, let application use the socket */
+ BIT(http_te_skip); /* pass the raw body data to the user, even when
+ transfer-encoded (chunked, compressed) */
+ BIT(http_ce_skip); /* pass the raw body data to the user, even when
+ content-encoded (chunked, compressed) */
+ BIT(proxy_transfer_mode); /* set transfer mode (;type=<a|i>) when doing
+ FTP via an HTTP proxy */
#if defined(HAVE_GSSAPI) || defined(USE_WINDOWS_SSPI)
- bit socks5_gssapi_nec:1; /* Flag to support NEC SOCKS5 server */
+ BIT(socks5_gssapi_nec); /* Flag to support NEC SOCKS5 server */
#endif
- bit sasl_ir:1; /* Enable/disable SASL initial response */
- bit wildcard_enabled:1; /* enable wildcard matching */
- bit tcp_keepalive:1; /* use TCP keepalives */
- bit tcp_fastopen:1; /* use TCP Fast Open */
- bit ssl_enable_npn:1; /* TLS NPN extension? */
- bit ssl_enable_alpn:1;/* TLS ALPN extension? */
- bit path_as_is:1; /* allow dotdots? */
- bit pipewait:1; /* wait for multiplex status before starting a new
- connection */
- bit suppress_connect_headers:1; /* suppress proxy CONNECT response headers
- from user callbacks */
- bit dns_shuffle_addresses:1; /* whether to shuffle addresses before use */
- bit stream_depends_e:1; /* set or don't set the Exclusive bit */
- bit haproxyprotocol:1; /* whether to send HAProxy PROXY protocol v1
- header */
- bit abstract_unix_socket:1;
- bit disallow_username_in_url:1; /* disallow username in url */
- bit doh:1; /* DNS-over-HTTPS enabled */
- bit doh_get:1; /* use GET for DoH requests, instead of POST */
- bit http09_allowed:1; /* allow HTTP/0.9 responses */
+ BIT(sasl_ir); /* Enable/disable SASL initial response */
+ BIT(wildcard_enabled); /* enable wildcard matching */
+ BIT(tcp_keepalive); /* use TCP keepalives */
+ BIT(tcp_fastopen); /* use TCP Fast Open */
+ BIT(ssl_enable_npn); /* TLS NPN extension? */
+ BIT(ssl_enable_alpn);/* TLS ALPN extension? */
+ BIT(path_as_is); /* allow dotdots? */
+ BIT(pipewait); /* wait for multiplex status before starting a new
+ connection */
+ BIT(suppress_connect_headers); /* suppress proxy CONNECT response headers
+ from user callbacks */
+ BIT(dns_shuffle_addresses); /* whether to shuffle addresses before use */
+ BIT(stream_depends_e); /* set or don't set the Exclusive bit */
+ BIT(haproxyprotocol); /* whether to send HAProxy PROXY protocol v1
+ header */
+ BIT(abstract_unix_socket);
+ BIT(disallow_username_in_url); /* disallow username in url */
+ BIT(doh); /* DNS-over-HTTPS enabled */
+ BIT(doh_get); /* use GET for DoH requests, instead of POST */
+ BIT(http09_allowed); /* allow HTTP/0.9 responses */
};
struct Names {
diff --git a/libs/libcurl/src/vauth/vauth.h b/libs/libcurl/src/vauth/vauth.h
index 73bd25ed5e..a1a557d2a1 100644
--- a/libs/libcurl/src/vauth/vauth.h
+++ b/libs/libcurl/src/vauth/vauth.h
@@ -43,7 +43,7 @@ struct negotiatedata;
#endif
#if defined(USE_WINDOWS_SSPI)
-#define GSS_ERROR(status) (status & 0x80000000)
+#define GSS_ERROR(status) ((status) & 0x80000000)
#endif
/* This is used to build a SPN string */
diff --git a/libs/libcurl/src/version.c b/libs/libcurl/src/version.c
index ae2b09d310..cfd09e36d7 100644
--- a/libs/libcurl/src/version.c
+++ b/libs/libcurl/src/version.c
@@ -104,14 +104,12 @@ char *curl_version(void)
left -= len;
ptr += len;
- if(left > 1) {
- len = Curl_ssl_version(ptr + 1, left - 1);
+ len = Curl_ssl_version(ptr + 1, left - 1);
- if(len > 0) {
- *ptr = ' ';
- left -= ++len;
- ptr += len;
- }
+ if(len > 0) {
+ *ptr = ' ';
+ left -= ++len;
+ ptr += len;
}
#ifdef HAVE_LIBZ
@@ -369,6 +367,9 @@ static curl_version_info_data version_info = {
#if defined(USE_ALTSVC)
| CURL_VERSION_ALTSVC
#endif
+#ifdef USE_ESNI
+ | CURL_VERSION_ESNI
+#endif
,
NULL, /* ssl_version */
0, /* ssl_version_num, this is kept at zero */
diff --git a/libs/libcurl/src/vquic/ngtcp2.c b/libs/libcurl/src/vquic/ngtcp2.c
new file mode 100644
index 0000000000..c0f9b16e38
--- /dev/null
+++ b/libs/libcurl/src/vquic/ngtcp2.c
@@ -0,0 +1,1593 @@
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.haxx.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+
+#ifdef USE_NGTCP2
+#include <ngtcp2/ngtcp2.h>
+#include <ngtcp2/ngtcp2_crypto.h>
+#include <nghttp3/nghttp3.h>
+#include <openssl/err.h>
+#include "urldata.h"
+#include "sendf.h"
+#include "strdup.h"
+#include "rand.h"
+#include "ngtcp2.h"
+#include "multiif.h"
+#include "strcase.h"
+#include "connect.h"
+#include "strerror.h"
+
+/* The last 3 #include files should be in this order */
+#include "curl_printf.h"
+#include "curl_memory.h"
+#include "memdebug.h"
+
+/* #define DEBUG_NGTCP2 */
+#ifdef CURLDEBUG
+#define DEBUG_HTTP3
+#endif
+#ifdef DEBUG_HTTP3
+#define H3BUGF(x) x
+#else
+#define H3BUGF(x) do { } WHILE_FALSE
+#endif
+
+/*
+ * This holds outgoing HTTP/3 stream data that is used by nghttp3 until acked.
+ * It is used as a circular buffer. Add new bytes at the end until it reaches
+ * the far end, then start over at index 0 again.
+ */
+
+#define H3_SEND_SIZE (20*1024)
+struct h3out {
+ uint8_t buf[H3_SEND_SIZE];
+ size_t used; /* number of bytes used in the buffer */
+ size_t windex; /* index in the buffer where to start writing the next
+ data block */
+};
+
+#define QUIC_MAX_STREAMS (256*1024)
+#define QUIC_MAX_DATA (1*1024*1024)
+#define QUIC_IDLE_TIMEOUT 60000 /* milliseconds */
+#define QUIC_CIPHERS \
+ "TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_" \
+ "POLY1305_SHA256:TLS_AES_128_CCM_SHA256"
+#define QUIC_GROUPS "P-256:X25519:P-384:P-521"
+
+static CURLcode ng_process_ingress(struct connectdata *conn,
+ curl_socket_t sockfd,
+ struct quicsocket *qs);
+static CURLcode ng_flush_egress(struct connectdata *conn, int sockfd,
+ struct quicsocket *qs);
+static int cb_h3_acked_stream_data(nghttp3_conn *conn, int64_t stream_id,
+ size_t datalen, void *user_data,
+ void *stream_user_data);
+
+static ngtcp2_tstamp timestamp(void)
+{
+ struct curltime ct = Curl_now();
+ return ct.tv_sec * NGTCP2_SECONDS + ct.tv_usec * NGTCP2_MICROSECONDS;
+}
+
+#ifdef DEBUG_NGTCP2
+static void quic_printf(void *user_data, const char *fmt, ...)
+{
+ va_list ap;
+ (void)user_data; /* TODO, use this to do infof() instead long-term */
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+ fprintf(stderr, "\n");
+}
+#endif
+
+static ngtcp2_crypto_level
+quic_from_ossl_level(OSSL_ENCRYPTION_LEVEL ossl_level)
+{
+ switch(ossl_level) {
+ case ssl_encryption_initial:
+ return NGTCP2_CRYPTO_LEVEL_INITIAL;
+ case ssl_encryption_early_data:
+ return NGTCP2_CRYPTO_LEVEL_EARLY;
+ case ssl_encryption_handshake:
+ return NGTCP2_CRYPTO_LEVEL_HANDSHAKE;
+ case ssl_encryption_application:
+ return NGTCP2_CRYPTO_LEVEL_APP;
+ default:
+ assert(0);
+ }
+}
+
+static int setup_initial_crypto_context(struct quicsocket *qs)
+{
+ const ngtcp2_cid *dcid = ngtcp2_conn_get_dcid(qs->qconn);
+
+ if(ngtcp2_crypto_derive_and_install_initial_key(
+ qs->qconn, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, dcid,
+ NGTCP2_CRYPTO_SIDE_CLIENT) != 0)
+ return -1;
+
+ return 0;
+}
+
+static void quic_settings(ngtcp2_settings *s,
+ uint64_t stream_buffer_size)
+{
+ ngtcp2_settings_default(s);
+#ifdef DEBUG_NGTCP2
+ s->log_printf = quic_printf;
+#else
+ s->log_printf = NULL;
+#endif
+ s->initial_ts = timestamp();
+ s->transport_params.initial_max_stream_data_bidi_local = stream_buffer_size;
+ s->transport_params.initial_max_stream_data_bidi_remote = QUIC_MAX_STREAMS;
+ s->transport_params.initial_max_stream_data_uni = QUIC_MAX_STREAMS;
+ s->transport_params.initial_max_data = QUIC_MAX_DATA;
+ s->transport_params.initial_max_streams_bidi = 1;
+ s->transport_params.initial_max_streams_uni = 3;
+ s->transport_params.idle_timeout = QUIC_IDLE_TIMEOUT;
+}
+
+static FILE *keylog_file; /* not thread-safe */
+static void keylog_callback(const SSL *ssl, const char *line)
+{
+ (void)ssl;
+ fputs(line, keylog_file);
+ fputc('\n', keylog_file);
+ fflush(keylog_file);
+}
+
+static int init_ngh3_conn(struct quicsocket *qs);
+
+static int quic_set_encryption_secrets(SSL *ssl,
+ OSSL_ENCRYPTION_LEVEL ossl_level,
+ const uint8_t *rx_secret,
+ const uint8_t *tx_secret,
+ size_t secretlen)
+{
+ struct quicsocket *qs = (struct quicsocket *)SSL_get_app_data(ssl);
+ int level = quic_from_ossl_level(ossl_level);
+
+ if(ngtcp2_crypto_derive_and_install_key(
+ qs->qconn, ssl, NULL, NULL, NULL, NULL, NULL, NULL, level, rx_secret,
+ tx_secret, secretlen, NGTCP2_CRYPTO_SIDE_CLIENT) != 0)
+ return 0;
+
+ if(level == NGTCP2_CRYPTO_LEVEL_APP && init_ngh3_conn(qs) != CURLE_OK)
+ return 0;
+
+ return 1;
+}
+
+static int quic_add_handshake_data(SSL *ssl, OSSL_ENCRYPTION_LEVEL ossl_level,
+ const uint8_t *data, size_t len)
+{
+ struct quicsocket *qs = (struct quicsocket *)SSL_get_app_data(ssl);
+ struct quic_handshake *crypto_data;
+ ngtcp2_crypto_level level = quic_from_ossl_level(ossl_level);
+ int rv;
+
+ crypto_data = &qs->client_crypto_data[level];
+ if(crypto_data->buf == NULL) {
+ crypto_data->buf = malloc(4096);
+ crypto_data->alloclen = 4096;
+ /* TODO Explode if malloc failed */
+ }
+
+ /* TODO Just pretend that handshake does not grow more than 4KiB for
+ now */
+ assert(crypto_data->len + len <= crypto_data->alloclen);
+
+ memcpy(&crypto_data->buf[crypto_data->len], data, len);
+ crypto_data->len += len;
+
+ rv = ngtcp2_conn_submit_crypto_data(
+ qs->qconn, level, (uint8_t *)(&crypto_data->buf[crypto_data->len] - len),
+ len);
+ if(rv) {
+ H3BUGF(fprintf(stderr, "write_client_handshake failed\n"));
+ }
+ assert(0 == rv);
+
+ return 1;
+}
+
+static int quic_flush_flight(SSL *ssl)
+{
+ (void)ssl;
+ return 1;
+}
+
+static int quic_send_alert(SSL *ssl, enum ssl_encryption_level_t level,
+ uint8_t alert)
+{
+ struct quicsocket *qs = (struct quicsocket *)SSL_get_app_data(ssl);
+ (void)level;
+
+ qs->tls_alert = alert;
+ return 1;
+}
+
+static SSL_QUIC_METHOD quic_method = {quic_set_encryption_secrets,
+ quic_add_handshake_data,
+ quic_flush_flight, quic_send_alert};
+
+static SSL_CTX *quic_ssl_ctx(struct Curl_easy *data)
+{
+ SSL_CTX *ssl_ctx = SSL_CTX_new(TLS_method());
+ const char *keylog_filename;
+
+ SSL_CTX_set_min_proto_version(ssl_ctx, TLS1_3_VERSION);
+ SSL_CTX_set_max_proto_version(ssl_ctx, TLS1_3_VERSION);
+
+ SSL_CTX_set_default_verify_paths(ssl_ctx);
+
+ if(SSL_CTX_set_ciphersuites(ssl_ctx, QUIC_CIPHERS) != 1) {
+ failf(data, "SSL_CTX_set_ciphersuites: %s",
+ ERR_error_string(ERR_get_error(), NULL));
+ return NULL;
+ }
+
+ if(SSL_CTX_set1_groups_list(ssl_ctx, QUIC_GROUPS) != 1) {
+ failf(data, "SSL_CTX_set1_groups_list failed");
+ return NULL;
+ }
+
+ SSL_CTX_set_quic_method(ssl_ctx, &quic_method);
+
+ keylog_filename = getenv("SSLKEYLOGFILE");
+ if(keylog_filename) {
+ keylog_file = fopen(keylog_filename, "wb");
+ if(keylog_file) {
+ SSL_CTX_set_keylog_callback(ssl_ctx, keylog_callback);
+ }
+ }
+
+ return ssl_ctx;
+}
+
+/** SSL callbacks ***/
+
+static int quic_init_ssl(struct quicsocket *qs)
+{
+ const uint8_t *alpn = NULL;
+ size_t alpnlen = 0;
+ /* this will need some attention when HTTPS proxy over QUIC get fixed */
+ const char * const hostname = qs->conn->host.name;
+
+ if(qs->ssl)
+ SSL_free(qs->ssl);
+
+ qs->ssl = SSL_new(qs->sslctx);
+
+ SSL_set_app_data(qs->ssl, qs);
+ SSL_set_connect_state(qs->ssl);
+
+ switch(qs->version) {
+#ifdef NGTCP2_PROTO_VER
+ case NGTCP2_PROTO_VER:
+ alpn = (const uint8_t *)NGTCP2_ALPN_H3;
+ alpnlen = sizeof(NGTCP2_ALPN_H3) - 1;
+ break;
+#endif
+ }
+ if(alpn)
+ SSL_set_alpn_protos(qs->ssl, alpn, (int)alpnlen);
+
+ /* set SNI */
+ SSL_set_tlsext_host_name(qs->ssl, hostname);
+ return 0;
+}
+
+static int cb_initial(ngtcp2_conn *quic, void *user_data)
+{
+ struct quicsocket *qs = (struct quicsocket *)user_data;
+
+ if(ngtcp2_crypto_read_write_crypto_data(
+ quic, qs->ssl, NGTCP2_CRYPTO_LEVEL_INITIAL, NULL, 0) != 0)
+ return NGTCP2_ERR_CALLBACK_FAILURE;
+
+ return 0;
+}
+
+static int
+cb_recv_crypto_data(ngtcp2_conn *tconn, ngtcp2_crypto_level crypto_level,
+ uint64_t offset,
+ const uint8_t *data, size_t datalen,
+ void *user_data)
+{
+ struct quicsocket *qs = (struct quicsocket *)user_data;
+ (void)offset;
+
+ if(ngtcp2_crypto_read_write_crypto_data(tconn, qs->ssl, crypto_level, data,
+ datalen) != 0)
+ return NGTCP2_ERR_CRYPTO;
+
+ return 0;
+}
+
+static int cb_handshake_completed(ngtcp2_conn *tconn, void *user_data)
+{
+ struct quicsocket *qs = (struct quicsocket *)user_data;
+ (void)tconn;
+ infof(qs->conn->data, "QUIC handshake is completed\n");
+
+ return 0;
+}
+
+static int cb_recv_stream_data(ngtcp2_conn *tconn, int64_t stream_id,
+ int fin, uint64_t offset,
+ const uint8_t *buf, size_t buflen,
+ void *user_data, void *stream_user_data)
+{
+ struct quicsocket *qs = (struct quicsocket *)user_data;
+ ssize_t nconsumed;
+ (void)offset;
+ (void)stream_user_data;
+
+ infof(qs->conn->data, "Received %ld bytes data on stream %u\n",
+ buflen, stream_id);
+
+ nconsumed =
+ nghttp3_conn_read_stream(qs->h3conn, stream_id, buf, buflen, fin);
+ if(nconsumed < 0) {
+ failf(qs->conn->data, "nghttp3_conn_read_stream returned error: %s\n",
+ nghttp3_strerror((int)nconsumed));
+ return NGTCP2_ERR_CALLBACK_FAILURE;
+ }
+
+ ngtcp2_conn_extend_max_stream_offset(tconn, stream_id, nconsumed);
+ ngtcp2_conn_extend_max_offset(tconn, nconsumed);
+
+ return 0;
+}
+
+static int
+cb_acked_stream_data_offset(ngtcp2_conn *tconn, int64_t stream_id,
+ uint64_t offset, size_t datalen, void *user_data,
+ void *stream_user_data)
+{
+ struct quicsocket *qs = (struct quicsocket *)user_data;
+ int rv;
+ (void)stream_id;
+ (void)tconn;
+ (void)offset;
+ (void)datalen;
+ (void)stream_user_data;
+
+ rv = nghttp3_conn_add_ack_offset(qs->h3conn, stream_id, datalen);
+ if(rv != 0) {
+ failf(qs->conn->data, "nghttp3_conn_add_ack_offset returned error: %s\n",
+ nghttp3_strerror(rv));
+ return NGTCP2_ERR_CALLBACK_FAILURE;
+ }
+
+ return 0;
+}
+
+static int cb_stream_close(ngtcp2_conn *tconn, int64_t stream_id,
+ uint64_t app_error_code,
+ void *user_data, void *stream_user_data)
+{
+ struct quicsocket *qs = (struct quicsocket *)user_data;
+ int rv;
+ (void)tconn;
+ (void)stream_user_data;
+ /* stream is closed... */
+
+ rv = nghttp3_conn_close_stream(qs->h3conn, stream_id,
+ app_error_code);
+ if(rv != 0) {
+ failf(qs->conn->data, "nghttp3_conn_close_stream returned error: %s\n",
+ nghttp3_strerror(rv));
+ return NGTCP2_ERR_CALLBACK_FAILURE;
+ }
+
+ return 0;
+}
+
+static int cb_stream_reset(ngtcp2_conn *tconn, int64_t stream_id,
+ uint64_t final_size, uint64_t app_error_code,
+ void *user_data, void *stream_user_data)
+{
+ struct quicsocket *qs = (struct quicsocket *)user_data;
+ int rv;
+ (void)tconn;
+ (void)final_size;
+ (void)app_error_code;
+ (void)stream_user_data;
+
+ rv = nghttp3_conn_reset_stream(qs->h3conn, stream_id);
+ if(rv != 0) {
+ failf(qs->conn->data, "nghttp3_conn_reset_stream returned error: %s\n",
+ nghttp3_strerror(rv));
+ return NGTCP2_ERR_CALLBACK_FAILURE;
+ }
+
+ return 0;
+}
+
+static int cb_recv_retry(ngtcp2_conn *tconn, const ngtcp2_pkt_hd *hd,
+ const ngtcp2_pkt_retry *retry, void *user_data)
+{
+ /* Re-generate handshake secrets here because connection ID might change. */
+ struct quicsocket *qs = (struct quicsocket *)user_data;
+ (void)tconn;
+ (void)hd;
+ (void)retry;
+
+ setup_initial_crypto_context(qs);
+
+ return 0;
+}
+
+static int cb_extend_max_local_streams_bidi(ngtcp2_conn *tconn,
+ uint64_t max_streams,
+ void *user_data)
+{
+ (void)tconn;
+ (void)max_streams;
+ (void)user_data;
+
+ return 0;
+}
+
+static int cb_extend_max_stream_data(ngtcp2_conn *tconn, int64_t stream_id,
+ uint64_t max_data, void *user_data,
+ void *stream_user_data)
+{
+ struct quicsocket *qs = (struct quicsocket *)user_data;
+ int rv;
+ (void)tconn;
+ (void)max_data;
+ (void)stream_user_data;
+
+ rv = nghttp3_conn_unblock_stream(qs->h3conn, stream_id);
+ if(rv != 0) {
+ failf(qs->conn->data, "nghttp3_conn_unblock_stream returned error: %s\n",
+ nghttp3_strerror(rv));
+ return NGTCP2_ERR_CALLBACK_FAILURE;
+ }
+
+ return 0;
+}
+
+static int cb_get_new_connection_id(ngtcp2_conn *tconn, ngtcp2_cid *cid,
+ uint8_t *token, size_t cidlen,
+ void *user_data)
+{
+ struct quicsocket *qs = (struct quicsocket *)user_data;
+ CURLcode result;
+ (void)tconn;
+
+ result = Curl_rand(qs->conn->data, cid->data, cidlen);
+ if(result)
+ return NGTCP2_ERR_CALLBACK_FAILURE;
+ cid->datalen = cidlen;
+
+ result = Curl_rand(qs->conn->data, token, NGTCP2_STATELESS_RESET_TOKENLEN);
+ if(result)
+ return NGTCP2_ERR_CALLBACK_FAILURE;
+
+ return 0;
+}
+
+static ngtcp2_conn_callbacks ng_callbacks = {
+ cb_initial,
+ NULL, /* recv_client_initial */
+ cb_recv_crypto_data,
+ cb_handshake_completed,
+ NULL, /* recv_version_negotiation */
+ ngtcp2_crypto_encrypt_cb,
+ ngtcp2_crypto_decrypt_cb,
+ ngtcp2_crypto_hp_mask_cb,
+ cb_recv_stream_data,
+ NULL, /* acked_crypto_offset */
+ cb_acked_stream_data_offset,
+ NULL, /* stream_open */
+ cb_stream_close,
+ NULL, /* recv_stateless_reset */
+ cb_recv_retry,
+ cb_extend_max_local_streams_bidi,
+ NULL, /* extend_max_local_streams_uni */
+ NULL, /* rand */
+ cb_get_new_connection_id,
+ NULL, /* remove_connection_id */
+ NULL, /* update_key */
+ NULL, /* path_validation */
+ NULL, /* select_preferred_addr */
+ cb_stream_reset,
+ NULL, /* extend_max_remote_streams_bidi */
+ NULL, /* extend_max_remote_streams_uni */
+ cb_extend_max_stream_data,
+};
+
+/*
+ * Might be called twice for happy eyeballs.
+ */
+CURLcode Curl_quic_connect(struct connectdata *conn,
+ curl_socket_t sockfd,
+ int sockindex,
+ const struct sockaddr *addr,
+ socklen_t addrlen)
+{
+ int rc;
+ int rv;
+ CURLcode result;
+ ngtcp2_path path; /* TODO: this must be initialized properly */
+ struct Curl_easy *data = conn->data;
+ struct quicsocket *qs = &conn->hequic[sockindex];
+ char ipbuf[40];
+ long port;
+ uint8_t paramsbuf[64];
+ ngtcp2_transport_params params;
+ ssize_t nwrite;
+
+ qs->conn = conn;
+
+ /* extract the used address as a string */
+ if(!Curl_addr2string((struct sockaddr*)addr, addrlen, ipbuf, &port)) {
+ char buffer[STRERROR_LEN];
+ failf(data, "ssrem inet_ntop() failed with errno %d: %s",
+ SOCKERRNO, Curl_strerror(SOCKERRNO, buffer, sizeof(buffer)));
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ }
+
+ infof(data, "Connect socket %d over QUIC to %s:%ld\n",
+ sockfd, ipbuf, port);
+
+ qs->version = NGTCP2_PROTO_VER;
+ qs->sslctx = quic_ssl_ctx(data);
+ if(!qs->sslctx)
+ return CURLE_FAILED_INIT; /* TODO: better return code */
+
+ if(quic_init_ssl(qs))
+ return CURLE_FAILED_INIT; /* TODO: better return code */
+
+ qs->dcid.datalen = NGTCP2_MAX_CIDLEN;
+ result = Curl_rand(data, qs->dcid.data, NGTCP2_MAX_CIDLEN);
+ if(result)
+ return result;
+
+ qs->scid.datalen = NGTCP2_MAX_CIDLEN;
+ result = Curl_rand(data, qs->scid.data, NGTCP2_MAX_CIDLEN);
+ if(result)
+ return result;
+
+ quic_settings(&qs->settings, data->set.buffer_size);
+
+ qs->local_addrlen = sizeof(qs->local_addr);
+ rv = getsockname(sockfd, (struct sockaddr *)&qs->local_addr,
+ &qs->local_addrlen);
+ if(rv == -1)
+ return CURLE_FAILED_INIT;
+
+ ngtcp2_addr_init(&path.local, (uint8_t *)&qs->local_addr, qs->local_addrlen,
+ NULL);
+ ngtcp2_addr_init(&path.remote, (uint8_t*)addr, addrlen, NULL);
+
+#ifdef NGTCP2_PROTO_VER
+#define QUICVER NGTCP2_PROTO_VER
+#else
+#error "unsupported ngtcp2 version"
+#endif
+ rc = ngtcp2_conn_client_new(&qs->qconn, &qs->dcid, &qs->scid, &path, QUICVER,
+ &ng_callbacks, &qs->settings, NULL, qs);
+ if(rc)
+ return CURLE_FAILED_INIT; /* TODO: create a QUIC error code */
+
+ ngtcp2_conn_get_local_transport_params(qs->qconn, &params);
+ nwrite = ngtcp2_encode_transport_params(
+ paramsbuf, sizeof(paramsbuf), NGTCP2_TRANSPORT_PARAMS_TYPE_CLIENT_HELLO,
+ &params);
+ if(nwrite < 0) {
+ failf(data, "ngtcp2_encode_transport_params: %s\n",
+ ngtcp2_strerror((int)nwrite));
+ return CURLE_FAILED_INIT;
+ }
+
+ if(!SSL_set_quic_transport_params(qs->ssl, paramsbuf, nwrite))
+ return CURLE_FAILED_INIT;
+
+ rc = setup_initial_crypto_context(qs);
+ if(rc)
+ return CURLE_FAILED_INIT; /* TODO: better return code */
+
+ return CURLE_OK;
+}
+
+/*
+ * Store ngtp2 version info in this buffer, Prefix with a space. Return total
+ * length written.
+ */
+int Curl_quic_ver(char *p, size_t len)
+{
+ ngtcp2_info *ng2 = ngtcp2_version(0);
+ nghttp3_info *ht3 = nghttp3_version(0);
+ return msnprintf(p, len, " ngtcp2/%s nghttp3/%s",
+ ng2->version_str, ht3->version_str);
+}
+
+static int ng_getsock(struct connectdata *conn, curl_socket_t *socks)
+{
+ struct SingleRequest *k = &conn->data->req;
+ int bitmap = GETSOCK_BLANK;
+
+ socks[0] = conn->sock[FIRSTSOCKET];
+
+ /* in a HTTP/2 connection we can basically always get a frame so we should
+ always be ready for one */
+ bitmap |= GETSOCK_READSOCK(FIRSTSOCKET);
+
+ /* we're still uploading or the HTTP/2 layer wants to send data */
+ if((k->keepon & (KEEP_SEND|KEEP_SEND_PAUSE)) == KEEP_SEND)
+ bitmap |= GETSOCK_WRITESOCK(FIRSTSOCKET);
+
+ return bitmap;
+}
+
+static int ng_perform_getsock(const struct connectdata *conn,
+ curl_socket_t *socks)
+{
+ return ng_getsock((struct connectdata *)conn, socks);
+}
+
+static CURLcode ng_disconnect(struct connectdata *conn,
+ bool dead_connection)
+{
+ (void)conn;
+ (void)dead_connection;
+ return CURLE_OK;
+}
+
+static unsigned int ng_conncheck(struct connectdata *conn,
+ unsigned int checks_to_perform)
+{
+ (void)conn;
+ (void)checks_to_perform;
+ return CONNRESULT_NONE;
+}
+
+static const struct Curl_handler Curl_handler_http3 = {
+ "HTTPS", /* scheme */
+ ZERO_NULL, /* setup_connection */
+ Curl_http, /* do_it */
+ Curl_http_done, /* done */
+ ZERO_NULL, /* do_more */
+ ZERO_NULL, /* connect_it */
+ ZERO_NULL, /* connecting */
+ ZERO_NULL, /* doing */
+ ng_getsock, /* proto_getsock */
+ ng_getsock, /* doing_getsock */
+ ZERO_NULL, /* domore_getsock */
+ ng_perform_getsock, /* perform_getsock */
+ ng_disconnect, /* disconnect */
+ ZERO_NULL, /* readwrite */
+ ng_conncheck, /* connection_check */
+ PORT_HTTP, /* defport */
+ CURLPROTO_HTTPS, /* protocol */
+ PROTOPT_SSL | PROTOPT_STREAM /* flags */
+};
+
+static int cb_h3_stream_close(nghttp3_conn *conn, int64_t stream_id,
+ uint64_t app_error_code, void *user_data,
+ void *stream_user_data)
+{
+ struct Curl_easy *data = stream_user_data;
+ struct HTTP *stream = data->req.protop;
+ (void)conn;
+ (void)stream_id;
+ (void)app_error_code;
+ (void)user_data;
+ H3BUGF(infof(data, "cb_h3_stream_close CALLED\n"));
+
+ stream->closed = TRUE;
+ Curl_expire(data, 0, EXPIRE_QUIC);
+ return 0;
+}
+
+static int cb_h3_recv_data(nghttp3_conn *conn, int64_t stream_id,
+ const uint8_t *buf, size_t buflen,
+ void *user_data, void *stream_user_data)
+{
+ struct quicsocket *qs = user_data;
+ size_t ncopy;
+ struct Curl_easy *data = stream_user_data;
+ struct HTTP *stream = data->req.protop;
+ (void)conn;
+ H3BUGF(infof(data, "cb_h3_recv_data CALLED with %d bytes\n", buflen));
+
+ /* TODO: this needs to be handled properly */
+ DEBUGASSERT(buflen <= stream->len);
+
+ ncopy = CURLMIN(stream->len, buflen);
+ memcpy(stream->mem, buf, ncopy);
+ stream->len -= ncopy;
+ stream->memlen += ncopy;
+#if 0 /* extra debugging of incoming h3 data */
+ fprintf(stderr, "!! Copies %zd bytes to %p (total %zd)\n",
+ ncopy, stream->mem, stream->memlen);
+ {
+ size_t i;
+ for(i = 0; i < ncopy; i++) {
+ fprintf(stderr, "!! data[%d]: %02x '%c'\n", i, buf[i], buf[i]);
+ }
+ }
+#endif
+ stream->mem += ncopy;
+
+ ngtcp2_conn_extend_max_stream_offset(qs->qconn, stream_id, buflen);
+ ngtcp2_conn_extend_max_offset(qs->qconn, buflen);
+
+ return 0;
+}
+
+static int cb_h3_deferred_consume(nghttp3_conn *conn, int64_t stream_id,
+ size_t consumed, void *user_data,
+ void *stream_user_data)
+{
+ struct quicsocket *qs = user_data;
+ (void)conn;
+ (void)stream_user_data;
+
+ ngtcp2_conn_extend_max_stream_offset(qs->qconn, stream_id, consumed);
+ ngtcp2_conn_extend_max_offset(qs->qconn, consumed);
+
+ return 0;
+}
+
+/* Decode HTTP status code. Returns -1 if no valid status code was
+ decoded. (duplicate from http2.c) */
+static int decode_status_code(const uint8_t *value, size_t len)
+{
+ int i;
+ int res;
+
+ if(len != 3) {
+ return -1;
+ }
+
+ res = 0;
+
+ for(i = 0; i < 3; ++i) {
+ char c = value[i];
+
+ if(c < '0' || c > '9') {
+ return -1;
+ }
+
+ res *= 10;
+ res += c - '0';
+ }
+
+ return res;
+}
+
+static int cb_h3_end_headers(nghttp3_conn *conn, int64_t stream_id,
+ void *user_data, void *stream_user_data)
+{
+ struct Curl_easy *data = stream_user_data;
+ struct HTTP *stream = data->req.protop;
+ (void)conn;
+ (void)stream_id;
+ (void)user_data;
+
+ if(stream->memlen >= 2) {
+ memcpy(stream->mem, "\r\n", 2);
+ stream->len -= 2;
+ stream->memlen += 2;
+ stream->mem += 2;
+ }
+ return 0;
+}
+
+static int cb_h3_recv_header(nghttp3_conn *conn, int64_t stream_id,
+ int32_t token, nghttp3_rcbuf *name,
+ nghttp3_rcbuf *value, uint8_t flags,
+ void *user_data, void *stream_user_data)
+{
+ nghttp3_vec h3name = nghttp3_rcbuf_get_buf(name);
+ nghttp3_vec h3val = nghttp3_rcbuf_get_buf(value);
+ struct Curl_easy *data = stream_user_data;
+ struct HTTP *stream = data->req.protop;
+ size_t ncopy;
+ (void)conn;
+ (void)stream_id;
+ (void)token;
+ (void)flags;
+ (void)user_data;
+
+ if(h3name.len == sizeof(":status") - 1 &&
+ !memcmp(":status", h3name.base, h3name.len)) {
+ int status = decode_status_code(h3val.base, h3val.len);
+ DEBUGASSERT(status != -1);
+ msnprintf(stream->mem, stream->len, "HTTP/3 %03d \r\n", status);
+ }
+ else {
+ /* store as a HTTP1-style header */
+ msnprintf(stream->mem, stream->len, "%.*s: %.*s\n",
+ h3name.len, h3name.base, h3val.len, h3val.base);
+ }
+
+ ncopy = strlen(stream->mem);
+ stream->len -= ncopy;
+ stream->memlen += ncopy;
+ stream->mem += ncopy;
+ return 0;
+}
+
+static int cb_h3_send_stop_sending(nghttp3_conn *conn, int64_t stream_id,
+ uint64_t app_error_code,
+ void *user_data,
+ void *stream_user_data)
+{
+ (void)conn;
+ (void)stream_id;
+ (void)app_error_code;
+ (void)user_data;
+ (void)stream_user_data;
+ return 0;
+}
+
+static nghttp3_conn_callbacks ngh3_callbacks = {
+ cb_h3_acked_stream_data, /* acked_stream_data */
+ cb_h3_stream_close,
+ cb_h3_recv_data,
+ cb_h3_deferred_consume,
+ NULL, /* begin_headers */
+ cb_h3_recv_header,
+ cb_h3_end_headers,
+ NULL, /* begin_trailers */
+ cb_h3_recv_header,
+ NULL, /* end_trailers */
+ NULL, /* http_begin_push_promise */
+ NULL, /* http_recv_push_promise */
+ NULL, /* http_end_push_promise */
+ NULL, /* http_cancel_push */
+ cb_h3_send_stop_sending,
+ NULL, /* push_stream */
+ NULL, /* end_stream */
+};
+
+static int init_ngh3_conn(struct quicsocket *qs)
+{
+ CURLcode result;
+ int rc;
+ int64_t ctrl_stream_id, qpack_enc_stream_id, qpack_dec_stream_id;
+
+ if(ngtcp2_conn_get_max_local_streams_uni(qs->qconn) < 3) {
+ failf(qs->conn->data, "too few available QUIC streams");
+ return CURLE_FAILED_INIT;
+ }
+
+ nghttp3_conn_settings_default(&qs->h3settings);
+
+ rc = nghttp3_conn_client_new(&qs->h3conn,
+ &ngh3_callbacks,
+ &qs->h3settings,
+ nghttp3_mem_default(),
+ qs);
+ if(rc) {
+ result = CURLE_OUT_OF_MEMORY;
+ goto fail;
+ }
+
+ rc = ngtcp2_conn_open_uni_stream(qs->qconn, &ctrl_stream_id, NULL);
+ if(rc) {
+ result = CURLE_FAILED_INIT;
+ goto fail;
+ }
+
+ rc = nghttp3_conn_bind_control_stream(qs->h3conn, ctrl_stream_id);
+ if(rc) {
+ result = CURLE_FAILED_INIT;
+ goto fail;
+ }
+
+ rc = ngtcp2_conn_open_uni_stream(qs->qconn, &qpack_enc_stream_id, NULL);
+ if(rc) {
+ result = CURLE_FAILED_INIT;
+ goto fail;
+ }
+
+ rc = ngtcp2_conn_open_uni_stream(qs->qconn, &qpack_dec_stream_id, NULL);
+ if(rc) {
+ result = CURLE_FAILED_INIT;
+ goto fail;
+ }
+
+ rc = nghttp3_conn_bind_qpack_streams(qs->h3conn, qpack_enc_stream_id,
+ qpack_dec_stream_id);
+ if(rc) {
+ result = CURLE_FAILED_INIT;
+ goto fail;
+ }
+
+ return CURLE_OK;
+ fail:
+
+ return result;
+}
+
+static Curl_recv ngh3_stream_recv;
+static Curl_send ngh3_stream_send;
+
+/* incoming data frames on the h3 stream */
+static ssize_t ngh3_stream_recv(struct connectdata *conn,
+ int sockindex,
+ char *buf,
+ size_t buffersize,
+ CURLcode *curlcode)
+{
+ curl_socket_t sockfd = conn->sock[sockindex];
+ struct HTTP *stream = conn->data->req.protop;
+ struct quicsocket *qs = conn->quic;
+
+ if(!stream->memlen) {
+ /* remember where to store incoming data for this stream and how big the
+ buffer is */
+ stream->mem = buf;
+ stream->len = buffersize;
+ }
+ /* else, there's data in the buffer already */
+
+ if(ng_process_ingress(conn, sockfd, qs)) {
+ *curlcode = CURLE_RECV_ERROR;
+ return -1;
+ }
+ if(ng_flush_egress(conn, sockfd, qs)) {
+ *curlcode = CURLE_SEND_ERROR;
+ return -1;
+ }
+
+ if(stream->memlen) {
+ ssize_t memlen = stream->memlen;
+ /* data arrived */
+ *curlcode = CURLE_OK;
+ /* reset to allow more data to come */
+ stream->memlen = 0;
+ stream->mem = buf;
+ stream->len = buffersize;
+ H3BUGF(infof(conn->data, "!! ngh3_stream_recv returns %zd bytes at %p\n",
+ memlen, buf));
+ return memlen;
+ }
+
+ if(stream->closed) {
+ *curlcode = CURLE_OK;
+ return 0;
+ }
+
+ infof(conn->data, "ngh3_stream_recv returns 0 bytes and EAGAIN\n");
+ *curlcode = CURLE_AGAIN;
+ return -1;
+}
+
+/* this amount of data has now been acked on this stream */
+static int cb_h3_acked_stream_data(nghttp3_conn *conn, int64_t stream_id,
+ size_t datalen, void *user_data,
+ void *stream_user_data)
+{
+ struct Curl_easy *data = stream_user_data;
+ struct HTTP *stream = data->req.protop;
+ (void)conn;
+ (void)stream_id;
+ (void)user_data;
+
+ if(!data->set.postfields) {
+ stream->h3out->used -= datalen;
+ H3BUGF(infof(data,
+ "cb_h3_acked_stream_data, %zd bytes, %zd left unacked\n",
+ datalen, stream->h3out->used));
+ DEBUGASSERT(stream->h3out->used < H3_SEND_SIZE);
+ }
+ return 0;
+}
+
+static ssize_t cb_h3_readfunction(nghttp3_conn *conn, int64_t stream_id,
+ nghttp3_vec *vec, size_t veccnt,
+ uint32_t *pflags, void *user_data,
+ void *stream_user_data)
+{
+ struct Curl_easy *data = stream_user_data;
+ size_t nread;
+ struct HTTP *stream = data->req.protop;
+ (void)conn;
+ (void)stream_id;
+ (void)user_data;
+ (void)veccnt;
+
+ if(data->set.postfields) {
+ vec[0].base = data->set.postfields;
+ vec[0].len = data->state.infilesize;
+ *pflags = NGHTTP3_DATA_FLAG_EOF;
+ return 1;
+ }
+
+ nread = CURLMIN(stream->upload_len, H3_SEND_SIZE - stream->h3out->used);
+ if(nread > 0) {
+ /* nghttp3 wants us to hold on to the data until it tells us it is okay to
+ delete it. Append the data at the end of the h3out buffer. Since we can
+ only return consecutive data, copy the amount that fits and the next
+ part comes in next invoke. */
+ struct h3out *out = stream->h3out;
+ if(nread + out->windex > H3_SEND_SIZE)
+ nread = H3_SEND_SIZE - out->windex;
+
+ memcpy(&out->buf[out->windex], stream->upload_mem, nread);
+ out->windex += nread;
+ out->used += nread;
+
+ /* that's the chunk we return to nghttp3 */
+ vec[0].base = &out->buf[out->windex];
+ vec[0].len = nread;
+
+ if(out->windex == H3_SEND_SIZE)
+ out->windex = 0; /* wrap */
+ stream->upload_mem += nread;
+ stream->upload_len -= nread;
+ if(data->state.infilesize != -1) {
+ stream->upload_left -= nread;
+ if(!stream->upload_left)
+ *pflags = NGHTTP3_DATA_FLAG_EOF;
+ }
+ H3BUGF(infof(data, "cb_h3_readfunction %zd bytes%s (at %zd unacked)\n",
+ nread, *pflags == NGHTTP3_DATA_FLAG_EOF?" EOF":"",
+ out->used));
+ }
+ if(stream->upload_done && !stream->upload_len &&
+ (stream->upload_left <= 0)) {
+ H3BUGF(infof(data, "!!!!!!!!! cb_h3_readfunction sets EOF\n"));
+ *pflags = NGHTTP3_DATA_FLAG_EOF;
+ return 0;
+ }
+ else if(!nread) {
+ return NGHTTP3_ERR_WOULDBLOCK;
+ }
+ return 1;
+}
+
+/* Index where :authority header field will appear in request header
+ field list. */
+#define AUTHORITY_DST_IDX 3
+
+static CURLcode http_request(struct connectdata *conn, const void *mem,
+ size_t len)
+{
+ struct HTTP *stream = conn->data->req.protop;
+ size_t nheader;
+ size_t i;
+ size_t authority_idx;
+ char *hdbuf = (char *)mem;
+ char *end, *line_end;
+ struct quicsocket *qs = conn->quic;
+ CURLcode result = CURLE_OK;
+ struct Curl_easy *data = conn->data;
+ nghttp3_nv *nva = NULL;
+ int64_t stream3_id;
+ int rc;
+ struct h3out *h3out = NULL;
+
+ rc = ngtcp2_conn_open_bidi_stream(qs->qconn, &stream3_id, NULL);
+ if(rc) {
+ failf(conn->data, "can get bidi streams");
+ result = CURLE_SEND_ERROR;
+ goto fail;
+ }
+
+ stream->stream3_id = stream3_id;
+ stream->h3req = TRUE; /* senf off! */
+
+ /* Calculate number of headers contained in [mem, mem + len). Assumes a
+ correctly generated HTTP header field block. */
+ nheader = 0;
+ for(i = 1; i < len; ++i) {
+ if(hdbuf[i] == '\n' && hdbuf[i - 1] == '\r') {
+ ++nheader;
+ ++i;
+ }
+ }
+ if(nheader < 2)
+ goto fail;
+
+ /* We counted additional 2 \r\n in the first and last line. We need 3
+ new headers: :method, :path and :scheme. Therefore we need one
+ more space. */
+ nheader += 1;
+ nva = malloc(sizeof(nghttp3_nv) * nheader);
+ if(!nva) {
+ result = CURLE_OUT_OF_MEMORY;
+ goto fail;
+ }
+
+ /* Extract :method, :path from request line
+ We do line endings with CRLF so checking for CR is enough */
+ line_end = memchr(hdbuf, '\r', len);
+ if(!line_end) {
+ result = CURLE_BAD_FUNCTION_ARGUMENT; /* internal error */
+ goto fail;
+ }
+
+ /* Method does not contain spaces */
+ end = memchr(hdbuf, ' ', line_end - hdbuf);
+ if(!end || end == hdbuf)
+ goto fail;
+ nva[0].name = (unsigned char *)":method";
+ nva[0].namelen = strlen((char *)nva[0].name);
+ nva[0].value = (unsigned char *)hdbuf;
+ nva[0].valuelen = (size_t)(end - hdbuf);
+ nva[0].flags = NGHTTP3_NV_FLAG_NONE;
+
+ hdbuf = end + 1;
+
+ /* Path may contain spaces so scan backwards */
+ end = NULL;
+ for(i = (size_t)(line_end - hdbuf); i; --i) {
+ if(hdbuf[i - 1] == ' ') {
+ end = &hdbuf[i - 1];
+ break;
+ }
+ }
+ if(!end || end == hdbuf)
+ goto fail;
+ nva[1].name = (unsigned char *)":path";
+ nva[1].namelen = strlen((char *)nva[1].name);
+ nva[1].value = (unsigned char *)hdbuf;
+ nva[1].valuelen = (size_t)(end - hdbuf);
+ nva[1].flags = NGHTTP3_NV_FLAG_NONE;
+
+ nva[2].name = (unsigned char *)":scheme";
+ nva[2].namelen = strlen((char *)nva[2].name);
+ if(conn->handler->flags & PROTOPT_SSL)
+ nva[2].value = (unsigned char *)"https";
+ else
+ nva[2].value = (unsigned char *)"http";
+ nva[2].valuelen = strlen((char *)nva[2].value);
+ nva[2].flags = NGHTTP3_NV_FLAG_NONE;
+
+
+ authority_idx = 0;
+ i = 3;
+ while(i < nheader) {
+ size_t hlen;
+
+ hdbuf = line_end + 2;
+
+ /* check for next CR, but only within the piece of data left in the given
+ buffer */
+ line_end = memchr(hdbuf, '\r', len - (hdbuf - (char *)mem));
+ if(!line_end || (line_end == hdbuf))
+ goto fail;
+
+ /* header continuation lines are not supported */
+ if(*hdbuf == ' ' || *hdbuf == '\t')
+ goto fail;
+
+ for(end = hdbuf; end < line_end && *end != ':'; ++end)
+ ;
+ if(end == hdbuf || end == line_end)
+ goto fail;
+ hlen = end - hdbuf;
+
+ if(hlen == 4 && strncasecompare("host", hdbuf, 4)) {
+ authority_idx = i;
+ nva[i].name = (unsigned char *)":authority";
+ nva[i].namelen = strlen((char *)nva[i].name);
+ }
+ else {
+ nva[i].namelen = (size_t)(end - hdbuf);
+ /* Lower case the header name for HTTP/3 */
+ Curl_strntolower((char *)hdbuf, hdbuf, nva[i].namelen);
+ nva[i].name = (unsigned char *)hdbuf;
+ }
+ nva[i].flags = NGHTTP3_NV_FLAG_NONE;
+ hdbuf = end + 1;
+ while(*hdbuf == ' ' || *hdbuf == '\t')
+ ++hdbuf;
+ end = line_end;
+
+#if 0 /* This should probably go in more or less like this */
+ switch(inspect_header((const char *)nva[i].name, nva[i].namelen, hdbuf,
+ end - hdbuf)) {
+ case HEADERINST_IGNORE:
+ /* skip header fields prohibited by HTTP/2 specification. */
+ --nheader;
+ continue;
+ case HEADERINST_TE_TRAILERS:
+ nva[i].value = (uint8_t*)"trailers";
+ nva[i].value_len = sizeof("trailers") - 1;
+ break;
+ default:
+ nva[i].value = (unsigned char *)hdbuf;
+ nva[i].value_len = (size_t)(end - hdbuf);
+ }
+#endif
+ nva[i].value = (unsigned char *)hdbuf;
+ nva[i].valuelen = (size_t)(end - hdbuf);
+ nva[i].flags = NGHTTP3_NV_FLAG_NONE;
+
+ ++i;
+ }
+
+ /* :authority must come before non-pseudo header fields */
+ if(authority_idx != 0 && authority_idx != AUTHORITY_DST_IDX) {
+ nghttp3_nv authority = nva[authority_idx];
+ for(i = authority_idx; i > AUTHORITY_DST_IDX; --i) {
+ nva[i] = nva[i - 1];
+ }
+ nva[i] = authority;
+ }
+
+ /* Warn stream may be rejected if cumulative length of headers is too
+ large. */
+#define MAX_ACC 60000 /* <64KB to account for some overhead */
+ {
+ size_t acc = 0;
+ for(i = 0; i < nheader; ++i)
+ acc += nva[i].namelen + nva[i].valuelen;
+
+ if(acc > MAX_ACC) {
+ infof(data, "http_request: Warning: The cumulative length of all "
+ "headers exceeds %zu bytes and that could cause the "
+ "stream to be rejected.\n", MAX_ACC);
+ }
+ }
+
+ switch(data->set.httpreq) {
+ case HTTPREQ_POST:
+ case HTTPREQ_POST_FORM:
+ case HTTPREQ_POST_MIME:
+ case HTTPREQ_PUT: {
+ nghttp3_data_reader data_reader;
+ if(data->state.infilesize != -1)
+ stream->upload_left = data->state.infilesize;
+ else
+ /* data sending without specifying the data amount up front */
+ stream->upload_left = -1; /* unknown, but not zero */
+
+ data_reader.read_data = cb_h3_readfunction;
+
+ h3out = calloc(sizeof(struct h3out), 1);
+ if(!h3out) {
+ result = CURLE_OUT_OF_MEMORY;
+ goto fail;
+ }
+ stream->h3out = h3out;
+
+ rc = nghttp3_conn_submit_request(qs->h3conn, stream->stream3_id,
+ nva, nheader, &data_reader,
+ conn->data);
+ if(rc) {
+ result = CURLE_SEND_ERROR;
+ goto fail;
+ }
+ break;
+ }
+ default:
+ stream->upload_left = 0; /* nothing left to send */
+ rc = nghttp3_conn_submit_request(qs->h3conn, stream->stream3_id,
+ nva, nheader,
+ NULL, /* no body! */
+ conn->data);
+ if(rc) {
+ result = CURLE_SEND_ERROR;
+ goto fail;
+ }
+ break;
+ }
+
+ Curl_safefree(nva);
+
+ infof(data, "Using HTTP/3 Stream ID: %x (easy handle %p)\n",
+ stream3_id, (void *)data);
+
+ return CURLE_OK;
+
+fail:
+ free(nva);
+ return result;
+}
+static ssize_t ngh3_stream_send(struct connectdata *conn,
+ int sockindex,
+ const void *mem,
+ size_t len,
+ CURLcode *curlcode)
+{
+ ssize_t sent;
+ struct quicsocket *qs = conn->quic;
+ curl_socket_t sockfd = conn->sock[sockindex];
+ struct HTTP *stream = conn->data->req.protop;
+
+ if(!stream->h3req) {
+ CURLcode result = http_request(conn, mem, len);
+ if(result) {
+ *curlcode = CURLE_SEND_ERROR;
+ return -1;
+ }
+ sent = len;
+ }
+ else {
+ H3BUGF(infof(conn->data, "ngh3_stream_send() wants to send %zd bytes\n",
+ len));
+ if(!stream->upload_len) {
+ stream->upload_mem = mem;
+ stream->upload_len = len;
+ (void)nghttp3_conn_resume_stream(qs->h3conn, stream->stream3_id);
+ sent = len;
+ }
+ else {
+ *curlcode = CURLE_AGAIN;
+ return -1;
+ }
+ }
+
+ if(ng_flush_egress(conn, sockfd, qs)) {
+ *curlcode = CURLE_SEND_ERROR;
+ return -1;
+ }
+
+ *curlcode = CURLE_OK;
+ return sent;
+}
+
+static void ng_has_connected(struct connectdata *conn, int tempindex)
+{
+ conn->recv[FIRSTSOCKET] = ngh3_stream_recv;
+ conn->send[FIRSTSOCKET] = ngh3_stream_send;
+ conn->handler = &Curl_handler_http3;
+ conn->bits.multiplex = TRUE; /* at least potentially multiplexed */
+ conn->httpversion = 30;
+ conn->bundle->multiuse = BUNDLE_MULTIPLEX;
+ conn->quic = &conn->hequic[tempindex];
+ DEBUGF(infof(conn->data, "ngtcp2 established connection!\n"));
+}
+
+/*
+ * There can be multiple connection attempts going on in parallel.
+ */
+CURLcode Curl_quic_is_connected(struct connectdata *conn,
+ int sockindex,
+ bool *done)
+{
+ CURLcode result;
+ struct quicsocket *qs = &conn->hequic[sockindex];
+ curl_socket_t sockfd = conn->tempsock[sockindex];
+
+ result = ng_process_ingress(conn, sockfd, qs);
+ if(result)
+ return result;
+
+ result = ng_flush_egress(conn, sockfd, qs);
+ if(result)
+ return result;
+
+ if(ngtcp2_conn_get_handshake_completed(qs->qconn)) {
+ *done = TRUE;
+ ng_has_connected(conn, sockindex);
+ }
+
+ return result;
+}
+
+static CURLcode ng_process_ingress(struct connectdata *conn, int sockfd,
+ struct quicsocket *qs)
+{
+ ssize_t recvd;
+ int rv;
+ uint8_t buf[65536];
+ size_t bufsize = sizeof(buf);
+ struct sockaddr_storage remote_addr;
+ socklen_t remote_addrlen;
+ ngtcp2_path path;
+ ngtcp2_tstamp ts = timestamp();
+
+ for(;;) {
+ remote_addrlen = sizeof(remote_addr);
+ while((recvd = recvfrom(sockfd, buf, bufsize, 0,
+ (struct sockaddr *)&remote_addr,
+ &remote_addrlen)) == -1 &&
+ SOCKERRNO == EINTR)
+ ;
+ if(recvd == -1) {
+ if(SOCKERRNO == EAGAIN || SOCKERRNO == EWOULDBLOCK)
+ break;
+
+ failf(conn->data, "ngtcp2: recvfrom() unexpectedly returned %d", recvd);
+ return CURLE_RECV_ERROR;
+ }
+
+ ngtcp2_addr_init(&path.local, (uint8_t *)&qs->local_addr,
+ qs->local_addrlen, NULL);
+ ngtcp2_addr_init(&path.remote, (uint8_t *)&remote_addr, remote_addrlen,
+ NULL);
+
+ rv = ngtcp2_conn_read_pkt(qs->qconn, &path, buf, recvd, ts);
+ if(rv != 0) {
+ /* TODO Send CONNECTION_CLOSE if possible */
+ return CURLE_RECV_ERROR;
+ }
+ }
+
+ return CURLE_OK;
+}
+
+static CURLcode ng_flush_egress(struct connectdata *conn, int sockfd,
+ struct quicsocket *qs)
+{
+ int rv;
+ ssize_t sent;
+ ssize_t outlen;
+ uint8_t out[NGTCP2_MAX_PKTLEN_IPV4];
+ size_t pktlen;
+ ngtcp2_path_storage ps;
+ ngtcp2_tstamp ts = timestamp();
+ struct sockaddr_storage remote_addr;
+ ngtcp2_tstamp expiry;
+ ngtcp2_duration timeout;
+ int64_t stream_id;
+ ssize_t veccnt;
+ int fin;
+ nghttp3_vec vec[16];
+ ssize_t ndatalen;
+
+ switch(qs->local_addr.ss_family) {
+ case AF_INET:
+ pktlen = NGTCP2_MAX_PKTLEN_IPV4;
+ break;
+ case AF_INET6:
+ pktlen = NGTCP2_MAX_PKTLEN_IPV6;
+ break;
+ default:
+ assert(0);
+ }
+
+ rv = ngtcp2_conn_handle_expiry(qs->qconn, ts);
+ if(rv != 0) {
+ failf(conn->data, "ngtcp2_conn_handle_expiry returned error: %s\n",
+ ngtcp2_strerror(rv));
+ return CURLE_SEND_ERROR;
+ }
+
+ ngtcp2_path_storage_zero(&ps);
+
+ for(;;) {
+ outlen = -1;
+ if(qs->h3conn && ngtcp2_conn_get_max_data_left(qs->qconn)) {
+ veccnt = nghttp3_conn_writev_stream(qs->h3conn, &stream_id, &fin, vec,
+ sizeof(vec) / sizeof(vec[0]));
+ if(veccnt < 0) {
+ failf(conn->data, "nghttp3_conn_writev_stream returned error: %s\n",
+ nghttp3_strerror((int)veccnt));
+ return CURLE_SEND_ERROR;
+ }
+ else if(veccnt > 0) {
+ outlen =
+ ngtcp2_conn_writev_stream(qs->qconn, &ps.path,
+ out, pktlen, &ndatalen,
+ NGTCP2_WRITE_STREAM_FLAG_MORE,
+ stream_id, fin,
+ (const ngtcp2_vec *)vec, veccnt, ts);
+ if(outlen == 0) {
+ break;
+ }
+ if(outlen < 0) {
+ if(outlen == NGTCP2_ERR_STREAM_DATA_BLOCKED ||
+ outlen == NGTCP2_ERR_STREAM_SHUT_WR) {
+ rv = nghttp3_conn_block_stream(qs->h3conn, stream_id);
+ if(rv != 0) {
+ failf(conn->data,
+ "nghttp3_conn_block_stream returned error: %s\n",
+ nghttp3_strerror(rv));
+ return CURLE_SEND_ERROR;
+ }
+ continue;
+ }
+ else if(outlen == NGTCP2_ERR_WRITE_STREAM_MORE) {
+ assert(ndatalen > 0);
+ rv = nghttp3_conn_add_write_offset(qs->h3conn, stream_id,
+ ndatalen);
+ if(rv != 0) {
+ failf(conn->data,
+ "nghttp3_conn_add_write_offset returned error: %s\n",
+ nghttp3_strerror(rv));
+ return CURLE_SEND_ERROR;
+ }
+ continue;
+ }
+ else {
+ failf(conn->data, "ngtcp2_conn_writev_stream returned error: %s\n",
+ ngtcp2_strerror((int)outlen));
+ return CURLE_SEND_ERROR;
+ }
+ }
+ else if(ndatalen >= 0) {
+ rv = nghttp3_conn_add_write_offset(qs->h3conn, stream_id, ndatalen);
+ if(rv != 0) {
+ failf(conn->data,
+ "nghttp3_conn_add_write_offset returned error: %s\n",
+ nghttp3_strerror(rv));
+ return CURLE_SEND_ERROR;
+ }
+ }
+ }
+ }
+ if(outlen < 0) {
+ outlen = ngtcp2_conn_write_pkt(qs->qconn, &ps.path, out, pktlen, ts);
+ if(outlen < 0) {
+ failf(conn->data, "ngtcp2_conn_write_pkt returned error: %s\n",
+ ngtcp2_strerror((int)outlen));
+ return CURLE_SEND_ERROR;
+ }
+ if(outlen == 0)
+ break;
+ }
+
+ memcpy(&remote_addr, ps.path.remote.addr, ps.path.remote.addrlen);
+ while((sent = send(sockfd, out, outlen, 0)) == -1 &&
+ SOCKERRNO == EINTR)
+ ;
+
+ if(sent == -1) {
+ if(SOCKERRNO == EAGAIN || SOCKERRNO == EWOULDBLOCK) {
+ /* TODO Cache packet */
+ break;
+ }
+ else {
+ failf(conn->data, "send() returned %zd (errno %d)\n", sent,
+ SOCKERRNO);
+ return CURLE_SEND_ERROR;
+ }
+ }
+ }
+
+ expiry = ngtcp2_conn_get_expiry(qs->qconn);
+ if(expiry != UINT64_MAX) {
+ if(expiry <= ts) {
+ timeout = NGTCP2_MILLISECONDS;
+ }
+ else {
+ timeout = expiry - ts;
+ }
+ Curl_expire(conn->data, timeout / NGTCP2_MILLISECONDS, EXPIRE_QUIC);
+ }
+
+ return CURLE_OK;
+}
+
+/*
+ * Called from transfer.c:done_sending when we stop HTTP/3 uploading.
+ */
+CURLcode Curl_quic_done_sending(struct connectdata *conn)
+{
+ if(conn->handler == &Curl_handler_http3) {
+ /* only for HTTP/3 transfers */
+ struct HTTP *stream = conn->data->req.protop;
+ struct quicsocket *qs = conn->quic;
+ stream->upload_done = TRUE;
+ (void)nghttp3_conn_resume_stream(qs->h3conn, stream->stream3_id);
+ }
+
+ return CURLE_OK;
+}
+#endif
diff --git a/libs/libcurl/src/vquic/ngtcp2.h b/libs/libcurl/src/vquic/ngtcp2.h
new file mode 100644
index 0000000000..5570fc7e78
--- /dev/null
+++ b/libs/libcurl/src/vquic/ngtcp2.h
@@ -0,0 +1,63 @@
+#ifndef HEADER_CURL_VQUIC_NGTCP2_H
+#define HEADER_CURL_VQUIC_NGTCP2_H
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.haxx.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+
+#ifdef USE_NGTCP2
+
+#include <ngtcp2/ngtcp2.h>
+#include <nghttp3/nghttp3.h>
+#include <openssl/ssl.h>
+
+struct quic_handshake {
+ char *buf; /* pointer to the buffer */
+ size_t alloclen; /* size of allocation */
+ size_t len; /* size of content in buffer */
+ size_t nread; /* how many bytes have been read */
+};
+
+struct quicsocket {
+ struct connectdata *conn; /* point back to the connection */
+ ngtcp2_conn *qconn;
+ ngtcp2_cid dcid;
+ ngtcp2_cid scid;
+ uint32_t version;
+ ngtcp2_settings settings;
+ SSL_CTX *sslctx;
+ SSL *ssl;
+ struct quic_handshake client_crypto_data[3];
+ /* the last TLS alert description generated by the local endpoint */
+ uint8_t tls_alert;
+ struct sockaddr_storage local_addr;
+ socklen_t local_addrlen;
+
+ nghttp3_conn *h3conn;
+ nghttp3_conn_settings h3settings;
+};
+
+#include "urldata.h"
+
+#endif
+
+#endif /* HEADER_CURL_VQUIC_NGTCP2_H */
diff --git a/libs/libcurl/src/vquic/quiche.c b/libs/libcurl/src/vquic/quiche.c
new file mode 100644
index 0000000000..0ee360d07f
--- /dev/null
+++ b/libs/libcurl/src/vquic/quiche.c
@@ -0,0 +1,783 @@
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.haxx.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+
+#ifdef USE_QUICHE
+#include <quiche.h>
+#include <openssl/err.h>
+#include "urldata.h"
+#include "sendf.h"
+#include "strdup.h"
+#include "rand.h"
+#include "quic.h"
+#include "strcase.h"
+#include "multiif.h"
+#include "connect.h"
+#include "strerror.h"
+
+/* The last 3 #include files should be in this order */
+#include "curl_printf.h"
+#include "curl_memory.h"
+#include "memdebug.h"
+
+#define DEBUG_HTTP3
+/* #define DEBUG_QUICHE */
+#ifdef DEBUG_HTTP3
+#define H3BUGF(x) x
+#else
+#define H3BUGF(x) do { } WHILE_FALSE
+#endif
+
+#define QUIC_MAX_STREAMS (256*1024)
+#define QUIC_MAX_DATA (1*1024*1024)
+#define QUIC_IDLE_TIMEOUT (60 * 1000) /* milliseconds */
+
+static CURLcode process_ingress(struct connectdata *conn,
+ curl_socket_t sockfd,
+ struct quicsocket *qs);
+
+static CURLcode flush_egress(struct connectdata *conn, curl_socket_t sockfd,
+ struct quicsocket *qs);
+
+static CURLcode http_request(struct connectdata *conn, const void *mem,
+ size_t len);
+static Curl_recv h3_stream_recv;
+static Curl_send h3_stream_send;
+
+
+static int quiche_getsock(struct connectdata *conn, curl_socket_t *socks)
+{
+ struct SingleRequest *k = &conn->data->req;
+ int bitmap = GETSOCK_BLANK;
+
+ socks[0] = conn->sock[FIRSTSOCKET];
+
+ /* in a HTTP/2 connection we can basically always get a frame so we should
+ always be ready for one */
+ bitmap |= GETSOCK_READSOCK(FIRSTSOCKET);
+
+ /* we're still uploading or the HTTP/2 layer wants to send data */
+ if((k->keepon & (KEEP_SEND|KEEP_SEND_PAUSE)) == KEEP_SEND)
+ bitmap |= GETSOCK_WRITESOCK(FIRSTSOCKET);
+
+ return bitmap;
+}
+
+static int quiche_perform_getsock(const struct connectdata *conn,
+ curl_socket_t *socks)
+{
+ return quiche_getsock((struct connectdata *)conn, socks);
+}
+
+static CURLcode quiche_disconnect(struct connectdata *conn,
+ bool dead_connection)
+{
+ struct quicsocket *qs = conn->quic;
+ (void)dead_connection;
+ quiche_h3_config_free(qs->h3config);
+ quiche_h3_conn_free(qs->h3c);
+ quiche_config_free(qs->cfg);
+ quiche_conn_free(qs->conn);
+ return CURLE_OK;
+}
+
+static unsigned int quiche_conncheck(struct connectdata *conn,
+ unsigned int checks_to_perform)
+{
+ (void)conn;
+ (void)checks_to_perform;
+ return CONNRESULT_NONE;
+}
+
+static CURLcode quiche_do(struct connectdata *conn, bool *done)
+{
+ struct HTTP *stream = conn->data->req.protop;
+ stream->h3req = FALSE; /* not sent */
+ return Curl_http(conn, done);
+}
+
+static const struct Curl_handler Curl_handler_http3 = {
+ "HTTPS", /* scheme */
+ ZERO_NULL, /* setup_connection */
+ quiche_do, /* do_it */
+ Curl_http_done, /* done */
+ ZERO_NULL, /* do_more */
+ ZERO_NULL, /* connect_it */
+ ZERO_NULL, /* connecting */
+ ZERO_NULL, /* doing */
+ quiche_getsock, /* proto_getsock */
+ quiche_getsock, /* doing_getsock */
+ ZERO_NULL, /* domore_getsock */
+ quiche_perform_getsock, /* perform_getsock */
+ quiche_disconnect, /* disconnect */
+ ZERO_NULL, /* readwrite */
+ quiche_conncheck, /* connection_check */
+ PORT_HTTP, /* defport */
+ CURLPROTO_HTTPS, /* protocol */
+ PROTOPT_SSL | PROTOPT_STREAM /* flags */
+};
+
+#ifdef DEBUG_QUICHE
+static void quiche_debug_log(const char *line, void *argp)
+{
+ (void)argp;
+ fprintf(stderr, "%s\n", line);
+}
+#endif
+
+CURLcode Curl_quic_connect(struct connectdata *conn, curl_socket_t sockfd,
+ int sockindex,
+ const struct sockaddr *addr, socklen_t addrlen)
+{
+ CURLcode result;
+ struct quicsocket *qs = &conn->hequic[sockindex];
+ struct Curl_easy *data = conn->data;
+
+#ifdef DEBUG_QUICHE
+ /* initialize debug log callback only once */
+ static int debug_log_init = 0;
+ if(!debug_log_init) {
+ quiche_enable_debug_logging(quiche_debug_log, NULL);
+ debug_log_init = 1;
+ }
+#endif
+
+ (void)addr;
+ (void)addrlen;
+
+ qs->cfg = quiche_config_new(QUICHE_PROTOCOL_VERSION);
+ if(!qs->cfg) {
+ failf(data, "can't create quiche config");
+ return CURLE_FAILED_INIT;
+ }
+
+ quiche_config_set_idle_timeout(qs->cfg, QUIC_IDLE_TIMEOUT);
+ quiche_config_set_initial_max_data(qs->cfg, QUIC_MAX_DATA);
+ quiche_config_set_initial_max_stream_data_bidi_local(qs->cfg, QUIC_MAX_DATA);
+ quiche_config_set_initial_max_stream_data_bidi_remote(qs->cfg,
+ QUIC_MAX_DATA);
+ quiche_config_set_initial_max_stream_data_uni(qs->cfg, QUIC_MAX_DATA);
+ quiche_config_set_initial_max_streams_bidi(qs->cfg, QUIC_MAX_STREAMS);
+ quiche_config_set_initial_max_streams_uni(qs->cfg, QUIC_MAX_STREAMS);
+ quiche_config_set_application_protos(qs->cfg,
+ (uint8_t *)
+ QUICHE_H3_APPLICATION_PROTOCOL,
+ sizeof(QUICHE_H3_APPLICATION_PROTOCOL)
+ - 1);
+
+ result = Curl_rand(data, qs->scid, sizeof(qs->scid));
+ if(result)
+ return result;
+
+ if(getenv("SSLKEYLOGFILE"))
+ quiche_config_log_keys(qs->cfg);
+
+ qs->conn = quiche_connect(conn->host.name, (const uint8_t *) qs->scid,
+ sizeof(qs->scid), qs->cfg);
+ if(!qs->conn) {
+ failf(data, "can't create quiche connection");
+ return CURLE_OUT_OF_MEMORY;
+ }
+
+ result = flush_egress(conn, sockfd, qs);
+ if(result)
+ return result;
+
+ /* store the used address as a string */
+ if(!Curl_addr2string((struct sockaddr*)addr, addrlen,
+ conn->primary_ip, &conn->primary_port)) {
+ char buffer[STRERROR_LEN];
+ failf(data, "ssrem inet_ntop() failed with errno %d: %s",
+ SOCKERRNO, Curl_strerror(SOCKERRNO, buffer, sizeof(buffer)));
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ }
+ memcpy(conn->ip_addr_str, conn->primary_ip, MAX_IPADR_LEN);
+ Curl_persistconninfo(conn);
+
+ /* for connection reuse purposes: */
+ conn->ssl[FIRSTSOCKET].state = ssl_connection_complete;
+
+ infof(data, "Sent QUIC client Initial, ALPN: %s\n",
+ QUICHE_H3_APPLICATION_PROTOCOL + 1);
+
+ return CURLE_OK;
+}
+
+static CURLcode quiche_has_connected(struct connectdata *conn,
+ int sockindex,
+ int tempindex)
+{
+ CURLcode result;
+ struct quicsocket *qs = conn->quic = &conn->hequic[tempindex];
+
+ conn->recv[sockindex] = h3_stream_recv;
+ conn->send[sockindex] = h3_stream_send;
+ conn->handler = &Curl_handler_http3;
+ conn->bits.multiplex = TRUE; /* at least potentially multiplexed */
+ conn->httpversion = 30;
+ conn->bundle->multiuse = BUNDLE_MULTIPLEX;
+
+ qs->h3config = quiche_h3_config_new();
+ if(!qs->h3config)
+ return CURLE_OUT_OF_MEMORY;
+
+ /* Create a new HTTP/3 connection on the QUIC connection. */
+ qs->h3c = quiche_h3_conn_new_with_transport(qs->conn, qs->h3config);
+ if(!qs->h3c) {
+ result = CURLE_OUT_OF_MEMORY;
+ goto fail;
+ }
+ if(conn->hequic[1-tempindex].cfg) {
+ qs = &conn->hequic[1-tempindex];
+ quiche_config_free(qs->cfg);
+ quiche_conn_free(qs->conn);
+ qs->cfg = NULL;
+ qs->conn = NULL;
+ }
+ return CURLE_OK;
+ fail:
+ quiche_h3_config_free(qs->h3config);
+ quiche_h3_conn_free(qs->h3c);
+ return result;
+}
+
+/*
+ * This function gets polled to check if this QUIC connection has connected.
+ */
+CURLcode Curl_quic_is_connected(struct connectdata *conn, int sockindex,
+ bool *done)
+{
+ CURLcode result;
+ struct quicsocket *qs = &conn->hequic[sockindex];
+ curl_socket_t sockfd = conn->tempsock[sockindex];
+
+ result = process_ingress(conn, sockfd, qs);
+ if(result)
+ return result;
+
+ result = flush_egress(conn, sockfd, qs);
+ if(result)
+ return result;
+
+ if(quiche_conn_is_established(qs->conn)) {
+ *done = TRUE;
+ result = quiche_has_connected(conn, 0, sockindex);
+ DEBUGF(infof(conn->data, "quiche established connection!\n"));
+ }
+
+ return result;
+}
+
+static CURLcode process_ingress(struct connectdata *conn, int sockfd,
+ struct quicsocket *qs)
+{
+ ssize_t recvd;
+ struct Curl_easy *data = conn->data;
+ uint8_t *buf = (uint8_t *)data->state.buffer;
+ size_t bufsize = data->set.buffer_size;
+
+ /* in case the timeout expired */
+ quiche_conn_on_timeout(qs->conn);
+
+ do {
+ recvd = recv(sockfd, buf, bufsize, 0);
+ if((recvd < 0) && ((SOCKERRNO == EAGAIN) || (SOCKERRNO == EWOULDBLOCK)))
+ break;
+
+ if(recvd < 0) {
+ failf(conn->data, "quiche: recv() unexpectedly returned %d "
+ "(errno: %d, socket %d)", recvd, SOCKERRNO, sockfd);
+ return CURLE_RECV_ERROR;
+ }
+
+ recvd = quiche_conn_recv(qs->conn, buf, recvd);
+ if(recvd == QUICHE_ERR_DONE)
+ break;
+
+ if(recvd < 0) {
+ failf(conn->data, "quiche_conn_recv() == %d", recvd);
+ return CURLE_RECV_ERROR;
+ }
+ } while(1);
+
+ return CURLE_OK;
+}
+
+/*
+ * flush_egress drains the buffers and sends off data.
+ * Calls failf() on errors.
+ */
+static CURLcode flush_egress(struct connectdata *conn, int sockfd,
+ struct quicsocket *qs)
+{
+ ssize_t sent;
+ static uint8_t out[1200];
+ int64_t timeout_ns;
+
+ do {
+ sent = quiche_conn_send(qs->conn, out, sizeof(out));
+ if(sent == QUICHE_ERR_DONE)
+ break;
+
+ if(sent < 0) {
+ failf(conn->data, "quiche_conn_send returned %zd\n",
+ sent);
+ return CURLE_SEND_ERROR;
+ }
+
+ sent = send(sockfd, out, sent, 0);
+ if(sent < 0) {
+ failf(conn->data, "send() returned %zd\n", sent);
+ return CURLE_SEND_ERROR;
+ }
+ } while(1);
+
+ /* time until the next timeout event, as nanoseconds. */
+ timeout_ns = quiche_conn_timeout_as_nanos(qs->conn);
+ if(timeout_ns)
+ /* expire uses milliseconds */
+ Curl_expire(conn->data, (timeout_ns + 999999) / 1000000, EXPIRE_QUIC);
+
+ return CURLE_OK;
+}
+
+struct h3h1header {
+ char *dest;
+ size_t destlen; /* left to use */
+ size_t nlen; /* used */
+};
+
+static int cb_each_header(uint8_t *name, size_t name_len,
+ uint8_t *value, size_t value_len,
+ void *argp)
+{
+ struct h3h1header *headers = (struct h3h1header *)argp;
+ size_t olen = 0;
+
+ if((name_len == 7) && !strncmp(":status", (char *)name, 7)) {
+ msnprintf(headers->dest,
+ headers->destlen, "HTTP/3 %.*s\n",
+ (int) value_len, value);
+ }
+ else {
+ msnprintf(headers->dest,
+ headers->destlen, "%.*s: %.*s\n",
+ (int)name_len, name, (int) value_len, value);
+ }
+ olen = strlen(headers->dest);
+ headers->destlen -= olen;
+ headers->nlen += olen;
+ headers->dest += olen;
+ return 0;
+}
+
+static ssize_t h3_stream_recv(struct connectdata *conn,
+ int sockindex,
+ char *buf,
+ size_t buffersize,
+ CURLcode *curlcode)
+{
+ ssize_t recvd = -1;
+ ssize_t rcode;
+ struct quicsocket *qs = conn->quic;
+ curl_socket_t sockfd = conn->sock[sockindex];
+ quiche_h3_event *ev;
+ int rc;
+ struct h3h1header headers;
+ struct Curl_easy *data = conn->data;
+ struct HTTP *stream = data->req.protop;
+ headers.dest = buf;
+ headers.destlen = buffersize;
+ headers.nlen = 0;
+
+ if(process_ingress(conn, sockfd, qs)) {
+ infof(data, "h3_stream_recv returns on ingress\n");
+ *curlcode = CURLE_RECV_ERROR;
+ return -1;
+ }
+
+ while(recvd < 0) {
+ int64_t s = quiche_h3_conn_poll(qs->h3c, qs->conn, &ev);
+ if(s < 0)
+ /* nothing more to do */
+ break;
+
+ if(s != stream->stream3_id) {
+ /* another transfer, ignore for now */
+ infof(data, "Got h3 for stream %u, expects %u\n",
+ s, stream->stream3_id);
+ continue;
+ }
+
+ switch(quiche_h3_event_type(ev)) {
+ case QUICHE_H3_EVENT_HEADERS:
+ rc = quiche_h3_event_for_each_header(ev, cb_each_header, &headers);
+ if(rc) {
+ /* what do we do about this? */
+ }
+ recvd = headers.nlen;
+ break;
+ case QUICHE_H3_EVENT_DATA:
+ if(!stream->firstbody) {
+ /* add a header-body separator CRLF */
+ buf[0] = '\r';
+ buf[1] = '\n';
+ buf += 2;
+ buffersize -= 2;
+ stream->firstbody = TRUE;
+ recvd = 2; /* two bytes already */
+ }
+ else
+ recvd = 0;
+ rcode = quiche_h3_recv_body(qs->h3c, qs->conn, s, (unsigned char *)buf,
+ buffersize);
+ if(rcode <= 0) {
+ recvd = -1;
+ break;
+ }
+ recvd += rcode;
+ break;
+
+ case QUICHE_H3_EVENT_FINISHED:
+ streamclose(conn, "End of stream");
+ recvd = 0; /* end of stream */
+ break;
+ default:
+ break;
+ }
+
+ quiche_h3_event_free(ev);
+ }
+ if(flush_egress(conn, sockfd, qs)) {
+ *curlcode = CURLE_SEND_ERROR;
+ return -1;
+ }
+
+ *curlcode = (-1 == recvd)? CURLE_AGAIN : CURLE_OK;
+ if(recvd >= 0)
+ /* Get this called again to drain the event queue */
+ Curl_expire(data, 0, EXPIRE_QUIC);
+
+ data->state.drain = (recvd >= 0) ? 1 : 0;
+ return recvd;
+}
+
+static ssize_t h3_stream_send(struct connectdata *conn,
+ int sockindex,
+ const void *mem,
+ size_t len,
+ CURLcode *curlcode)
+{
+ ssize_t sent;
+ struct quicsocket *qs = conn->quic;
+ curl_socket_t sockfd = conn->sock[sockindex];
+ struct HTTP *stream = conn->data->req.protop;
+
+ if(!stream->h3req) {
+ CURLcode result = http_request(conn, mem, len);
+ if(result) {
+ *curlcode = CURLE_SEND_ERROR;
+ return -1;
+ }
+ sent = len;
+ }
+ else {
+ H3BUGF(infof(conn->data, "Pass on %zd body bytes to quiche\n",
+ len));
+ sent = quiche_h3_send_body(qs->h3c, qs->conn, stream->stream3_id,
+ (uint8_t *)mem, len, FALSE);
+ if(sent < 0) {
+ *curlcode = CURLE_SEND_ERROR;
+ return -1;
+ }
+ }
+
+ if(flush_egress(conn, sockfd, qs)) {
+ *curlcode = CURLE_SEND_ERROR;
+ return -1;
+ }
+
+ *curlcode = CURLE_OK;
+ return sent;
+}
+
+/*
+ * Store quiche version info in this buffer, Prefix with a space. Return total
+ * length written.
+ */
+int Curl_quic_ver(char *p, size_t len)
+{
+ return msnprintf(p, len, " quiche/%s", quiche_version());
+}
+
+/* Index where :authority header field will appear in request header
+ field list. */
+#define AUTHORITY_DST_IDX 3
+
+static CURLcode http_request(struct connectdata *conn, const void *mem,
+ size_t len)
+{
+ /*
+ */
+ struct HTTP *stream = conn->data->req.protop;
+ size_t nheader;
+ size_t i;
+ size_t authority_idx;
+ char *hdbuf = (char *)mem;
+ char *end, *line_end;
+ int64_t stream3_id;
+ quiche_h3_header *nva = NULL;
+ struct quicsocket *qs = conn->quic;
+ CURLcode result = CURLE_OK;
+ struct Curl_easy *data = conn->data;
+
+ stream->h3req = TRUE; /* senf off! */
+
+ /* Calculate number of headers contained in [mem, mem + len). Assumes a
+ correctly generated HTTP header field block. */
+ nheader = 0;
+ for(i = 1; i < len; ++i) {
+ if(hdbuf[i] == '\n' && hdbuf[i - 1] == '\r') {
+ ++nheader;
+ ++i;
+ }
+ }
+ if(nheader < 2)
+ goto fail;
+
+ /* We counted additional 2 \r\n in the first and last line. We need 3
+ new headers: :method, :path and :scheme. Therefore we need one
+ more space. */
+ nheader += 1;
+ nva = malloc(sizeof(quiche_h3_header) * nheader);
+ if(!nva) {
+ result = CURLE_OUT_OF_MEMORY;
+ goto fail;
+ }
+
+ /* Extract :method, :path from request line
+ We do line endings with CRLF so checking for CR is enough */
+ line_end = memchr(hdbuf, '\r', len);
+ if(!line_end) {
+ result = CURLE_BAD_FUNCTION_ARGUMENT; /* internal error */
+ goto fail;
+ }
+
+ /* Method does not contain spaces */
+ end = memchr(hdbuf, ' ', line_end - hdbuf);
+ if(!end || end == hdbuf)
+ goto fail;
+ nva[0].name = (unsigned char *)":method";
+ nva[0].name_len = strlen((char *)nva[0].name);
+ nva[0].value = (unsigned char *)hdbuf;
+ nva[0].value_len = (size_t)(end - hdbuf);
+
+ hdbuf = end + 1;
+
+ /* Path may contain spaces so scan backwards */
+ end = NULL;
+ for(i = (size_t)(line_end - hdbuf); i; --i) {
+ if(hdbuf[i - 1] == ' ') {
+ end = &hdbuf[i - 1];
+ break;
+ }
+ }
+ if(!end || end == hdbuf)
+ goto fail;
+ nva[1].name = (unsigned char *)":path";
+ nva[1].name_len = strlen((char *)nva[1].name);
+ nva[1].value = (unsigned char *)hdbuf;
+ nva[1].value_len = (size_t)(end - hdbuf);
+
+ nva[2].name = (unsigned char *)":scheme";
+ nva[2].name_len = strlen((char *)nva[2].name);
+ if(conn->handler->flags & PROTOPT_SSL)
+ nva[2].value = (unsigned char *)"https";
+ else
+ nva[2].value = (unsigned char *)"http";
+ nva[2].value_len = strlen((char *)nva[2].value);
+
+
+ authority_idx = 0;
+ i = 3;
+ while(i < nheader) {
+ size_t hlen;
+
+ hdbuf = line_end + 2;
+
+ /* check for next CR, but only within the piece of data left in the given
+ buffer */
+ line_end = memchr(hdbuf, '\r', len - (hdbuf - (char *)mem));
+ if(!line_end || (line_end == hdbuf))
+ goto fail;
+
+ /* header continuation lines are not supported */
+ if(*hdbuf == ' ' || *hdbuf == '\t')
+ goto fail;
+
+ for(end = hdbuf; end < line_end && *end != ':'; ++end)
+ ;
+ if(end == hdbuf || end == line_end)
+ goto fail;
+ hlen = end - hdbuf;
+
+ if(hlen == 4 && strncasecompare("host", hdbuf, 4)) {
+ authority_idx = i;
+ nva[i].name = (unsigned char *)":authority";
+ nva[i].name_len = strlen((char *)nva[i].name);
+ }
+ else {
+ nva[i].name_len = (size_t)(end - hdbuf);
+ /* Lower case the header name for HTTP/3 */
+ Curl_strntolower((char *)hdbuf, hdbuf, nva[i].name_len);
+ nva[i].name = (unsigned char *)hdbuf;
+ }
+ hdbuf = end + 1;
+ while(*hdbuf == ' ' || *hdbuf == '\t')
+ ++hdbuf;
+ end = line_end;
+
+#if 0 /* This should probably go in more or less like this */
+ switch(inspect_header((const char *)nva[i].name, nva[i].namelen, hdbuf,
+ end - hdbuf)) {
+ case HEADERINST_IGNORE:
+ /* skip header fields prohibited by HTTP/2 specification. */
+ --nheader;
+ continue;
+ case HEADERINST_TE_TRAILERS:
+ nva[i].value = (uint8_t*)"trailers";
+ nva[i].value_len = sizeof("trailers") - 1;
+ break;
+ default:
+ nva[i].value = (unsigned char *)hdbuf;
+ nva[i].value_len = (size_t)(end - hdbuf);
+ }
+#endif
+ nva[i].value = (unsigned char *)hdbuf;
+ nva[i].value_len = (size_t)(end - hdbuf);
+
+ ++i;
+ }
+
+ /* :authority must come before non-pseudo header fields */
+ if(authority_idx != 0 && authority_idx != AUTHORITY_DST_IDX) {
+ quiche_h3_header authority = nva[authority_idx];
+ for(i = authority_idx; i > AUTHORITY_DST_IDX; --i) {
+ nva[i] = nva[i - 1];
+ }
+ nva[i] = authority;
+ }
+
+ /* Warn stream may be rejected if cumulative length of headers is too
+ large. */
+#define MAX_ACC 60000 /* <64KB to account for some overhead */
+ {
+ size_t acc = 0;
+
+ for(i = 0; i < nheader; ++i) {
+ acc += nva[i].name_len + nva[i].value_len;
+
+ H3BUGF(infof(data, "h3 [%.*s: %.*s]\n",
+ nva[i].name_len, nva[i].name,
+ nva[i].value_len, nva[i].value));
+ }
+
+ if(acc > MAX_ACC) {
+ infof(data, "http_request: Warning: The cumulative length of all "
+ "headers exceeds %zu bytes and that could cause the "
+ "stream to be rejected.\n", MAX_ACC);
+ }
+ }
+
+ switch(data->set.httpreq) {
+ case HTTPREQ_POST:
+ case HTTPREQ_POST_FORM:
+ case HTTPREQ_POST_MIME:
+ case HTTPREQ_PUT:
+ if(data->state.infilesize != -1)
+ stream->upload_left = data->state.infilesize;
+ else
+ /* data sending without specifying the data amount up front */
+ stream->upload_left = -1; /* unknown, but not zero */
+
+ stream3_id = quiche_h3_send_request(qs->h3c, qs->conn, nva, nheader,
+ stream->upload_left ? FALSE: TRUE);
+ if((stream3_id >= 0) && data->set.postfields) {
+ ssize_t sent = quiche_h3_send_body(qs->h3c, qs->conn, stream3_id,
+ (uint8_t *)data->set.postfields,
+ stream->upload_left, TRUE);
+ if(sent <= 0) {
+ failf(data, "quiche_h3_send_body failed!");
+ result = CURLE_SEND_ERROR;
+ }
+ stream->upload_left = 0; /* nothing left to send */
+ }
+ break;
+ default:
+ stream3_id = quiche_h3_send_request(qs->h3c, qs->conn, nva, nheader,
+ TRUE);
+ break;
+ }
+
+ Curl_safefree(nva);
+
+ if(stream3_id < 0) {
+ H3BUGF(infof(data, "quiche_h3_send_request returned %d\n",
+ stream3_id));
+ result = CURLE_SEND_ERROR;
+ goto fail;
+ }
+
+ infof(data, "Using HTTP/3 Stream ID: %x (easy handle %p)\n",
+ stream3_id, (void *)data);
+ stream->stream3_id = stream3_id;
+
+ return CURLE_OK;
+
+fail:
+ free(nva);
+ return result;
+}
+
+/*
+ * Called from transfer.c:done_sending when we stop HTTP/3 uploading.
+ */
+CURLcode Curl_quic_done_sending(struct connectdata *conn)
+{
+ if(conn->handler == &Curl_handler_http3) {
+ /* only for HTTP/3 transfers */
+ ssize_t sent;
+ struct HTTP *stream = conn->data->req.protop;
+ struct quicsocket *qs = conn->quic;
+ fprintf(stderr, "!!! Curl_quic_done_sending\n");
+ stream->upload_done = TRUE;
+ sent = quiche_h3_send_body(qs->h3c, qs->conn, stream->stream3_id,
+ NULL, 0, TRUE);
+ if(sent < 0)
+ return CURLE_SEND_ERROR;
+ }
+
+ return CURLE_OK;
+}
+
+#endif
diff --git a/libs/libcurl/src/vquic/quiche.h b/libs/libcurl/src/vquic/quiche.h
new file mode 100644
index 0000000000..c8d1837b56
--- /dev/null
+++ b/libs/libcurl/src/vquic/quiche.h
@@ -0,0 +1,49 @@
+#ifndef HEADER_CURL_VQUIC_QUICHE_H
+#define HEADER_CURL_VQUIC_QUICHE_H
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.haxx.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+
+#ifdef USE_QUICHE
+
+#include <quiche.h>
+
+struct quic_handshake {
+ char *buf; /* pointer to the buffer */
+ size_t alloclen; /* size of allocation */
+ size_t len; /* size of content in buffer */
+ size_t nread; /* how many bytes have been read */
+};
+
+struct quicsocket {
+ quiche_config *cfg;
+ quiche_conn *conn;
+ quiche_h3_conn *h3c;
+ quiche_h3_config *h3config;
+ uint8_t scid[QUICHE_MAX_CONN_ID_LEN];
+ uint32_t version;
+};
+
+#endif
+
+#endif /* HEADER_CURL_VQUIC_QUICHE_H */
diff --git a/libs/libcurl/src/vssh/libssh.c b/libs/libcurl/src/vssh/libssh.c
index 76956a3c1f..cad8b37864 100644
--- a/libs/libcurl/src/vssh/libssh.c
+++ b/libs/libcurl/src/vssh/libssh.c
@@ -493,7 +493,7 @@ restart:
return SSH_ERROR;
nprompts = ssh_userauth_kbdint_getnprompts(sshc->ssh_session);
- if(nprompts == SSH_ERROR || nprompts != 1)
+ if(nprompts != 1)
return SSH_ERROR;
rc = ssh_userauth_kbdint_setanswer(sshc->ssh_session, 0, conn->passwd);
@@ -1356,7 +1356,7 @@ static CURLcode myssh_statemach_act(struct connectdata *conn, bool *block)
break;
}
}
- else if(sshc->readdir_attrs == NULL && sftp_dir_eof(sshc->sftp_dir)) {
+ else if(sftp_dir_eof(sshc->sftp_dir)) {
state(conn, SSH_SFTP_READDIR_DONE);
break;
}
@@ -1999,7 +1999,7 @@ static CURLcode myssh_block_statemach(struct connectdata *conn,
}
}
- if(!result && block) {
+ if(block) {
curl_socket_t fd_read = conn->sock[FIRSTSOCKET];
/* wait for the socket to become ready */
(void) Curl_socket_check(fd_read, CURL_SOCKET_BAD,
diff --git a/libs/libcurl/src/vssh/libssh2.c b/libs/libcurl/src/vssh/libssh2.c
index 2b25a514f4..c71cfbc9fd 100644
--- a/libs/libcurl/src/vssh/libssh2.c
+++ b/libs/libcurl/src/vssh/libssh2.c
@@ -2811,7 +2811,7 @@ static CURLcode ssh_block_statemach(struct connectdata *conn,
}
#ifdef HAVE_LIBSSH2_SESSION_BLOCK_DIRECTION
- if(!result && block) {
+ if(block) {
int dir = libssh2_session_block_directions(sshc->ssh_session);
curl_socket_t sock = conn->sock[FIRSTSOCKET];
curl_socket_t fd_read = CURL_SOCKET_BAD;
@@ -2822,7 +2822,7 @@ static CURLcode ssh_block_statemach(struct connectdata *conn,
fd_write = sock;
/* wait for the socket to become ready */
(void)Curl_socket_check(fd_read, CURL_SOCKET_BAD, fd_write,
- left>1000?1000:left); /* ignore result */
+ left>1000?1000:(time_t)left);
}
#endif
diff --git a/libs/libcurl/src/vtls/gskit.c b/libs/libcurl/src/vtls/gskit.c
index b93ff5d4f4..32153dd071 100644
--- a/libs/libcurl/src/vtls/gskit.c
+++ b/libs/libcurl/src/vtls/gskit.c
@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -26,6 +26,8 @@
#include <gskssl.h>
#include <qsoasync.h>
+#undef HAVE_SOCKETPAIR /* because the native one isn't good enough */
+#include "socketpair.h"
/* Some symbols are undefined/unsupported on OS400 versions < V7R1. */
#ifndef GSK_SSL_EXTN_SERVERNAME_REQUEST
@@ -511,100 +513,6 @@ static void close_async_handshake(struct ssl_connect_data *connssl)
BACKEND->iocport = -1;
}
-/* SSL over SSL
- * Problems:
- * 1) GSKit can only perform SSL on an AF_INET or AF_INET6 stream socket. To
- * pipe an SSL stream into another, it is therefore needed to have a pair
- * of such communicating sockets and handle the pipelining explicitly.
- * 2) OS/400 socketpair() is only implemented for domain AF_UNIX, thus cannot
- * be used to produce the pipeline.
- * The solution is to simulate socketpair() for AF_INET with low-level API
- * listen(), bind() and connect().
- */
-
-static int
-inetsocketpair(int sv[2])
-{
- int lfd; /* Listening socket. */
- int sfd; /* Server socket. */
- int cfd; /* Client socket. */
- int len;
- struct sockaddr_in addr1;
- struct sockaddr_in addr2;
-
- /* Create listening socket on a local dynamic port. */
- lfd = socket(AF_INET, SOCK_STREAM, 0);
- if(lfd < 0)
- return -1;
- memset((char *) &addr1, 0, sizeof(addr1));
- addr1.sin_family = AF_INET;
- addr1.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- addr1.sin_port = 0;
- if(bind(lfd, (struct sockaddr *) &addr1, sizeof(addr1)) ||
- listen(lfd, 2) < 0) {
- close(lfd);
- return -1;
- }
-
- /* Get the allocated port. */
- len = sizeof(addr1);
- if(getsockname(lfd, (struct sockaddr *) &addr1, &len) < 0) {
- close(lfd);
- return -1;
- }
-
- /* Create the client socket. */
- cfd = socket(AF_INET, SOCK_STREAM, 0);
- if(cfd < 0) {
- close(lfd);
- return -1;
- }
-
- /* Request unblocking connection to the listening socket. */
- curlx_nonblock(cfd, TRUE);
- if(connect(cfd, (struct sockaddr *) &addr1, sizeof(addr1)) < 0 &&
- errno != EINPROGRESS) {
- close(lfd);
- close(cfd);
- return -1;
- }
-
- /* Get the client dynamic port for intrusion check below. */
- len = sizeof(addr2);
- if(getsockname(cfd, (struct sockaddr *) &addr2, &len) < 0) {
- close(lfd);
- close(cfd);
- return -1;
- }
-
- /* Accept the incoming connection and get the server socket. */
- curlx_nonblock(lfd, TRUE);
- for(;;) {
- len = sizeof(addr1);
- sfd = accept(lfd, (struct sockaddr *) &addr1, &len);
- if(sfd < 0) {
- close(lfd);
- close(cfd);
- return -1;
- }
-
- /* Check for possible intrusion from an external process. */
- if(addr1.sin_addr.s_addr == addr2.sin_addr.s_addr &&
- addr1.sin_port == addr2.sin_port)
- break;
-
- /* Intrusion: reject incoming connection. */
- close(sfd);
- }
-
- /* Done, return sockets and succeed. */
- close(lfd);
- curlx_nonblock(cfd, FALSE);
- sv[0] = cfd;
- sv[1] = sfd;
- return 0;
-}
-
static int pipe_ssloverssl(struct connectdata *conn, int sockindex,
int directions)
{
@@ -855,7 +763,7 @@ static CURLcode gskit_connect_step1(struct connectdata *conn, int sockindex)
/* Establish a pipelining socket pair for SSL over SSL. */
if(conn->proxy_ssl[sockindex].use) {
- if(inetsocketpair(sockpair))
+ if(Curl_socketpair(0, 0, 0, sockpair))
return CURLE_SSL_CONNECT_ERROR;
BACKEND->localfd = sockpair[0];
BACKEND->remotefd = sockpair[1];
@@ -1157,7 +1065,7 @@ static CURLcode gskit_connect_common(struct connectdata *conn, int sockindex,
{
struct Curl_easy *data = conn->data;
struct ssl_connect_data *connssl = &conn->ssl[sockindex];
- long timeout_ms;
+ timediff_t timeout_ms;
CURLcode result = CURLE_OK;
*done = connssl->state == ssl_connection_complete;
diff --git a/libs/libcurl/src/vtls/gtls.c b/libs/libcurl/src/vtls/gtls.c
index 8693cdce3f..3737d7c685 100644
--- a/libs/libcurl/src/vtls/gtls.c
+++ b/libs/libcurl/src/vtls/gtls.c
@@ -288,7 +288,7 @@ static CURLcode handshake(struct connectdata *conn,
curl_socket_t sockfd = conn->sock[sockindex];
for(;;) {
- time_t timeout_ms;
+ timediff_t timeout_ms;
int rc;
/* check allowed time left */
@@ -311,7 +311,7 @@ static CURLcode handshake(struct connectdata *conn,
what = Curl_socket_check(readfd, CURL_SOCKET_BAD, writefd,
nonblocking?0:
- timeout_ms?timeout_ms:1000);
+ timeout_ms?(time_t)timeout_ms:1000);
if(what < 0) {
/* fatal error */
failf(data, "select/poll on SSL socket, errno: %d", SOCKERRNO);
@@ -1608,7 +1608,7 @@ static ssize_t gtls_send(struct connectdata *conn,
static void close_one(struct ssl_connect_data *connssl)
{
if(BACKEND->session) {
- gnutls_bye(BACKEND->session, GNUTLS_SHUT_RDWR);
+ gnutls_bye(BACKEND->session, GNUTLS_SHUT_WR);
gnutls_deinit(BACKEND->session);
BACKEND->session = NULL;
}
diff --git a/libs/libcurl/src/vtls/mbedtls.c b/libs/libcurl/src/vtls/mbedtls.c
index 63d1f4c81b..e34ec9d13f 100644
--- a/libs/libcurl/src/vtls/mbedtls.c
+++ b/libs/libcurl/src/vtls/mbedtls.c
@@ -588,6 +588,9 @@ mbed_connect_step2(struct connectdata *conn,
else if(ret & MBEDTLS_X509_BADCERT_NOT_TRUSTED)
failf(data, "Cert verify failed: BADCERT_NOT_TRUSTED");
+ else if(ret & MBEDTLS_X509_BADCERT_FUTURE)
+ failf(data, "Cert verify failed: BADCERT_FUTURE");
+
return CURLE_PEER_FAILED_VERIFICATION;
}
@@ -884,7 +887,7 @@ mbed_connect_common(struct connectdata *conn,
struct Curl_easy *data = conn->data;
struct ssl_connect_data *connssl = &conn->ssl[sockindex];
curl_socket_t sockfd = conn->sock[sockindex];
- long timeout_ms;
+ timediff_t timeout_ms;
int what;
/* check if the connection has already been established */
@@ -930,7 +933,7 @@ mbed_connect_common(struct connectdata *conn,
connssl->connecting_state?sockfd:CURL_SOCKET_BAD;
what = Curl_socket_check(readfd, CURL_SOCKET_BAD, writefd,
- nonblocking ? 0 : timeout_ms);
+ nonblocking ? 0 : (time_t)timeout_ms);
if(what < 0) {
/* fatal error */
failf(data, "select/poll on SSL socket, errno: %d", SOCKERRNO);
diff --git a/libs/libcurl/src/vtls/mesalink.c b/libs/libcurl/src/vtls/mesalink.c
index 9507888bdf..cab1e390b4 100644
--- a/libs/libcurl/src/vtls/mesalink.c
+++ b/libs/libcurl/src/vtls/mesalink.c
@@ -6,7 +6,7 @@
* \___|\___/|_| \_\_____|
*
* Copyright (C) 2017 - 2018, Yiming Jing, <jingyiming@baidu.com>
- * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -494,7 +494,7 @@ mesalink_connect_common(struct connectdata *conn, int sockindex,
struct Curl_easy *data = conn->data;
struct ssl_connect_data *connssl = &conn->ssl[sockindex];
curl_socket_t sockfd = conn->sock[sockindex];
- time_t timeout_ms;
+ timediff_t timeout_ms;
int what;
/* check if the connection has already been established */
@@ -543,7 +543,8 @@ mesalink_connect_common(struct connectdata *conn, int sockindex,
: CURL_SOCKET_BAD;
what = Curl_socket_check(
- readfd, CURL_SOCKET_BAD, writefd, nonblocking ? 0 : timeout_ms);
+ readfd, CURL_SOCKET_BAD, writefd,
+ nonblocking ? 0 : (time_t)timeout_ms);
if(what < 0) {
/* fatal error */
failf(data, "select/poll on SSL socket, errno: %d", SOCKERRNO);
diff --git a/libs/libcurl/src/vtls/nss.c b/libs/libcurl/src/vtls/nss.c
index 435f3e93a1..a375f00da2 100644
--- a/libs/libcurl/src/vtls/nss.c
+++ b/libs/libcurl/src/vtls/nss.c
@@ -2127,7 +2127,7 @@ static CURLcode nss_do_connect(struct connectdata *conn, int sockindex)
/* check timeout situation */
- const time_t time_left = Curl_timeleft(data, NULL, TRUE);
+ const timediff_t time_left = Curl_timeleft(data, NULL, TRUE);
if(time_left < 0) {
failf(data, "timed out before SSL handshake");
result = CURLE_OPERATION_TIMEDOUT;
diff --git a/libs/libcurl/src/vtls/openssl.c b/libs/libcurl/src/vtls/openssl.c
index 385f281793..760758d234 100644
--- a/libs/libcurl/src/vtls/openssl.c
+++ b/libs/libcurl/src/vtls/openssl.c
@@ -44,6 +44,7 @@
#include "strcase.h"
#include "hostcheck.h"
#include "multiif.h"
+#include "strerror.h"
#include "curl_printf.h"
#include <openssl/ssl.h>
#include <openssl/rand.h>
@@ -2165,8 +2166,13 @@ set_ssl_version_min_max(SSL_CTX *ctx, struct connectdata *conn)
long curl_ssl_version_max;
/* convert cURL min SSL version option to OpenSSL constant */
+#if defined(OPENSSL_IS_BORINGSSL) || defined(LIBRESSL_VERSION_NUMBER)
+ uint16_t ossl_ssl_version_min = 0;
+ uint16_t ossl_ssl_version_max = 0;
+#else
long ossl_ssl_version_min = 0;
long ossl_ssl_version_max = 0;
+#endif
switch(curl_ssl_version_min) {
case CURL_SSLVERSION_TLSv1: /* TLS 1.x */
case CURL_SSLVERSION_TLSv1_0:
@@ -2186,10 +2192,10 @@ set_ssl_version_min_max(SSL_CTX *ctx, struct connectdata *conn)
}
/* CURL_SSLVERSION_DEFAULT means that no option was selected.
- We don't want to pass 0 to SSL_CTX_set_min_proto_version as
- it would enable all versions down to the lowest supported by
- the library.
- So we skip this, and stay with the OS default
+ We don't want to pass 0 to SSL_CTX_set_min_proto_version as
+ it would enable all versions down to the lowest supported by
+ the library.
+ So we skip this, and stay with the OS default
*/
if(curl_ssl_version_min != CURL_SSLVERSION_DEFAULT) {
if(!SSL_CTX_set_min_proto_version(ctx, ossl_ssl_version_min)) {
@@ -3649,7 +3655,7 @@ static CURLcode ossl_connect_common(struct connectdata *conn,
struct Curl_easy *data = conn->data;
struct ssl_connect_data *connssl = &conn->ssl[sockindex];
curl_socket_t sockfd = conn->sock[sockindex];
- time_t timeout_ms;
+ timediff_t timeout_ms;
int what;
/* check if the connection has already been established */
@@ -3696,7 +3702,7 @@ static CURLcode ossl_connect_common(struct connectdata *conn,
connssl->connecting_state?sockfd:CURL_SOCKET_BAD;
what = Curl_socket_check(readfd, CURL_SOCKET_BAD, writefd,
- nonblocking?0:timeout_ms);
+ nonblocking?0:(time_t)timeout_ms);
if(what < 0) {
/* fatal error */
failf(data, "select/poll on SSL socket, errno: %d", SOCKERRNO);
@@ -3820,8 +3826,8 @@ static ssize_t ossl_send(struct connectdata *conn,
*curlcode = CURLE_AGAIN;
return -1;
case SSL_ERROR_SYSCALL:
- failf(conn->data, "SSL_write() returned SYSCALL, errno = %d",
- SOCKERRNO);
+ Curl_strerror(SOCKERRNO, error_buffer, sizeof(error_buffer));
+ failf(conn->data, OSSL_PACKAGE " SSL_write: %s", error_buffer);
*curlcode = CURLE_SEND_ERROR;
return -1;
case SSL_ERROR_SSL:
@@ -3878,13 +3884,21 @@ static ssize_t ossl_recv(struct connectdata *conn, /* connection data */
break;
case SSL_ERROR_ZERO_RETURN: /* no more data */
/* close_notify alert */
- connclose(conn, "TLS close_notify");
+ if(num == FIRSTSOCKET)
+ /* mark the connection for close if it is indeed the control
+ connection */
+ connclose(conn, "TLS close_notify");
break;
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
/* there's data pending, re-invoke SSL_read() */
*curlcode = CURLE_AGAIN;
return -1;
+ case SSL_ERROR_SYSCALL:
+ Curl_strerror(SOCKERRNO, error_buffer, sizeof(error_buffer));
+ failf(conn->data, OSSL_PACKAGE " SSL_read: %s", error_buffer);
+ *curlcode = CURLE_RECV_ERROR;
+ return -1;
default:
/* openssl/ssl.h for SSL_ERROR_SYSCALL says "look at error stack/return
value/errno" */
diff --git a/libs/libcurl/src/vtls/polarssl.c b/libs/libcurl/src/vtls/polarssl.c
index 7ea26b4425..9e7dd90437 100644
--- a/libs/libcurl/src/vtls/polarssl.c
+++ b/libs/libcurl/src/vtls/polarssl.c
@@ -734,7 +734,7 @@ polarssl_connect_common(struct connectdata *conn,
struct Curl_easy *data = conn->data;
struct ssl_connect_data *connssl = &conn->ssl[sockindex];
curl_socket_t sockfd = conn->sock[sockindex];
- long timeout_ms;
+ timediff_t timeout_ms;
int what;
/* check if the connection has already been established */
@@ -781,7 +781,7 @@ polarssl_connect_common(struct connectdata *conn,
connssl->connecting_state?sockfd:CURL_SOCKET_BAD;
what = Curl_socket_check(readfd, CURL_SOCKET_BAD, writefd,
- nonblocking?0:timeout_ms);
+ nonblocking?0:(time_t)timeout_ms);
if(what < 0) {
/* fatal error */
failf(data, "select/poll on SSL socket, errno: %d", SOCKERRNO);
diff --git a/libs/libcurl/src/vtls/schannel.c b/libs/libcurl/src/vtls/schannel.c
index 0f6f734fdc..bbd2fe921c 100644
--- a/libs/libcurl/src/vtls/schannel.c
+++ b/libs/libcurl/src/vtls/schannel.c
@@ -1181,6 +1181,7 @@ struct Adder_args
struct connectdata *conn;
CURLcode result;
int idx;
+ int certs_count;
};
static bool
@@ -1191,7 +1192,9 @@ add_cert_to_certinfo(const CERT_CONTEXT *ccert_context, void *raw_arg)
if(valid_cert_encoding(ccert_context)) {
const char *beg = (const char *) ccert_context->pbCertEncoded;
const char *end = beg + ccert_context->cbCertEncoded;
- args->result = Curl_extract_certinfo(args->conn, (args->idx)++, beg, end);
+ int insert_index = (args->certs_count - 1) - args->idx;
+ args->result = Curl_extract_certinfo(args->conn, insert_index, beg, end);
+ args->idx++;
}
return args->result == CURLE_OK;
}
@@ -1326,6 +1329,7 @@ schannel_connect_step3(struct connectdata *conn, int sockindex)
struct Adder_args args;
args.conn = conn;
args.idx = 0;
+ args.certs_count = certs_count;
traverse_cert_store(ccert_context, add_cert_to_certinfo, &args);
result = args.result;
}
@@ -1347,7 +1351,7 @@ schannel_connect_common(struct connectdata *conn, int sockindex,
struct Curl_easy *data = conn->data;
struct ssl_connect_data *connssl = &conn->ssl[sockindex];
curl_socket_t sockfd = conn->sock[sockindex];
- time_t timeout_ms;
+ timediff_t timeout_ms;
int what;
/* check if the connection has already been established */
@@ -1394,7 +1398,7 @@ schannel_connect_common(struct connectdata *conn, int sockindex,
connssl->connecting_state ? sockfd : CURL_SOCKET_BAD;
what = Curl_socket_check(readfd, CURL_SOCKET_BAD, writefd,
- nonblocking ? 0 : timeout_ms);
+ nonblocking ? 0 : (time_t)timeout_ms);
if(what < 0) {
/* fatal error */
failf(data, "select/poll on SSL/TLS socket, errno: %d", SOCKERRNO);
@@ -1544,7 +1548,7 @@ schannel_send(struct connectdata *conn, int sockindex,
/* send entire message or fail */
while(len > (size_t)written) {
ssize_t this_write;
- time_t timeleft;
+ timediff_t timeleft;
int what;
this_write = 0;
diff --git a/libs/libcurl/src/vtls/schannel_verify.c b/libs/libcurl/src/vtls/schannel_verify.c
index 5a09e969e8..1bdf50a55c 100644
--- a/libs/libcurl/src/vtls/schannel_verify.c
+++ b/libs/libcurl/src/vtls/schannel_verify.c
@@ -111,7 +111,7 @@ static CURLcode add_certs_to_store(HCERTSTORE trust_store,
*/
ca_file_handle = CreateFile(ca_file_tstr,
GENERIC_READ,
- 0,
+ FILE_SHARE_READ,
NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
diff --git a/libs/libcurl/src/vtls/sectransp.c b/libs/libcurl/src/vtls/sectransp.c
index 3fb125ab56..4eece89d55 100644
--- a/libs/libcurl/src/vtls/sectransp.c
+++ b/libs/libcurl/src/vtls/sectransp.c
@@ -79,7 +79,7 @@
/* These macros mean "the following code is present to allow runtime backward
compatibility with at least this cat or earlier":
(You set this at build-time using the compiler command line option
- "-mmacos-version-min.") */
+ "-mmacosx-version-min.") */
#define CURL_SUPPORT_MAC_10_5 MAC_OS_X_VERSION_MIN_REQUIRED <= 1050
#define CURL_SUPPORT_MAC_10_6 MAC_OS_X_VERSION_MIN_REQUIRED <= 1060
#define CURL_SUPPORT_MAC_10_7 MAC_OS_X_VERSION_MIN_REQUIRED <= 1070
@@ -2805,7 +2805,7 @@ sectransp_connect_common(struct connectdata *conn,
struct Curl_easy *data = conn->data;
struct ssl_connect_data *connssl = &conn->ssl[sockindex];
curl_socket_t sockfd = conn->sock[sockindex];
- long timeout_ms;
+ timediff_t timeout_ms;
int what;
/* check if the connection has already been established */
@@ -2852,7 +2852,7 @@ sectransp_connect_common(struct connectdata *conn,
connssl->connecting_state?sockfd:CURL_SOCKET_BAD;
what = Curl_socket_check(readfd, CURL_SOCKET_BAD, writefd,
- nonblocking?0:timeout_ms);
+ nonblocking?0:(time_t)timeout_ms);
if(what < 0) {
/* fatal error */
failf(data, "select/poll on SSL socket, errno: %d", SOCKERRNO);
diff --git a/libs/libcurl/src/vtls/vtls.c b/libs/libcurl/src/vtls/vtls.c
index 422819899e..e6d7562254 100644
--- a/libs/libcurl/src/vtls/vtls.c
+++ b/libs/libcurl/src/vtls/vtls.c
@@ -97,7 +97,8 @@ Curl_ssl_config_matches(struct ssl_primary_config* data,
Curl_safe_strcasecompare(data->random_file, needle->random_file) &&
Curl_safe_strcasecompare(data->egdsocket, needle->egdsocket) &&
Curl_safe_strcasecompare(data->cipher_list, needle->cipher_list) &&
- Curl_safe_strcasecompare(data->cipher_list13, needle->cipher_list13))
+ Curl_safe_strcasecompare(data->cipher_list13, needle->cipher_list13) &&
+ Curl_safe_strcasecompare(data->pinned_key, needle->pinned_key))
return TRUE;
return FALSE;
@@ -121,6 +122,7 @@ Curl_clone_primary_ssl_config(struct ssl_primary_config *source,
CLONE_STRING(egdsocket);
CLONE_STRING(cipher_list);
CLONE_STRING(cipher_list13);
+ CLONE_STRING(pinned_key);
return TRUE;
}
@@ -134,6 +136,7 @@ void Curl_free_primary_ssl_config(struct ssl_primary_config* sslc)
Curl_safefree(sslc->egdsocket);
Curl_safefree(sslc->cipher_list);
Curl_safefree(sslc->cipher_list13);
+ Curl_safefree(sslc->pinned_key);
}
#ifdef USE_SSL