summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordartraiden <wowemuh@gmail.com>2024-07-25 00:50:30 +0300
committerdartraiden <wowemuh@gmail.com>2024-07-25 02:38:23 +0300
commit67a42fc97c64c83e02f6f0d68e5a4a22c71138d3 (patch)
tree21eb2d53a9cd7e645a58662dee11588f56057eee
parent0a365886f2d06750a707037d894e1492988eb53c (diff)
libcurl: update to 8.9.0
-rw-r--r--libs/libcurl/docs/CHANGES10493
-rw-r--r--libs/libcurl/docs/THANKS38
-rw-r--r--libs/libcurl/include/README.md2
-rw-r--r--libs/libcurl/include/curl/curl.h125
-rw-r--r--libs/libcurl/include/curl/curlver.h10
-rw-r--r--libs/libcurl/include/curl/easy.h2
-rw-r--r--libs/libcurl/include/curl/mprintf.h3
-rw-r--r--libs/libcurl/include/curl/multi.h16
-rw-r--r--libs/libcurl/include/curl/system.h4
-rw-r--r--libs/libcurl/include/curl/typecheck-gcc.h10
-rw-r--r--libs/libcurl/include/curl/urlapi.h5
-rw-r--r--libs/libcurl/src/CMakeLists.txt65
-rw-r--r--libs/libcurl/src/Makefile.am2
-rw-r--r--libs/libcurl/src/Makefile.in7
-rw-r--r--libs/libcurl/src/altsvc.c24
-rw-r--r--libs/libcurl/src/altsvc.h2
-rw-r--r--libs/libcurl/src/amigaos.c2
-rw-r--r--libs/libcurl/src/arpa_telnet.h2
-rw-r--r--libs/libcurl/src/asyn-ares.c92
-rw-r--r--libs/libcurl/src/asyn-thread.c63
-rw-r--r--libs/libcurl/src/asyn.h16
-rw-r--r--libs/libcurl/src/bufref.c2
-rw-r--r--libs/libcurl/src/c-hyper.c21
-rw-r--r--libs/libcurl/src/cf-h1-proxy.c30
-rw-r--r--libs/libcurl/src/cf-h2-proxy.c84
-rw-r--r--libs/libcurl/src/cf-haproxy.c1
-rw-r--r--libs/libcurl/src/cf-https-connect.c49
-rw-r--r--libs/libcurl/src/cf-socket.c586
-rw-r--r--libs/libcurl/src/cf-socket.h9
-rw-r--r--libs/libcurl/src/cfilters.c111
-rw-r--r--libs/libcurl/src/cfilters.h39
-rw-r--r--libs/libcurl/src/config-os400.h3
-rw-r--r--libs/libcurl/src/config-riscos.h3
-rw-r--r--libs/libcurl/src/config-win32.h7
-rw-r--r--libs/libcurl/src/conncache.c673
-rw-r--r--libs/libcurl/src/conncache.h48
-rw-r--r--libs/libcurl/src/connect.c102
-rw-r--r--libs/libcurl/src/connect.h19
-rw-r--r--libs/libcurl/src/content_encoding.c49
-rw-r--r--libs/libcurl/src/cookie.c77
-rw-r--r--libs/libcurl/src/cookie.h2
-rw-r--r--libs/libcurl/src/curl_addrinfo.c18
-rw-r--r--libs/libcurl/src/curl_addrinfo.h6
-rw-r--r--libs/libcurl/src/curl_config.h.cmake33
-rw-r--r--libs/libcurl/src/curl_config.h.in12
-rw-r--r--libs/libcurl/src/curl_des.c2
-rw-r--r--libs/libcurl/src/curl_endian.c6
-rw-r--r--libs/libcurl/src/curl_fnmatch.c2
-rw-r--r--libs/libcurl/src/curl_gethostname.c10
-rw-r--r--libs/libcurl/src/curl_multibyte.h7
-rw-r--r--libs/libcurl/src/curl_ntlm_core.c36
-rw-r--r--libs/libcurl/src/curl_ntlm_core.h7
-rw-r--r--libs/libcurl/src/curl_rtmp.c6
-rw-r--r--libs/libcurl/src/curl_sasl.c2
-rw-r--r--libs/libcurl/src/curl_setup.h81
-rw-r--r--libs/libcurl/src/curl_setup_once.h14
-rw-r--r--libs/libcurl/src/curl_sha512_256.c18
-rw-r--r--libs/libcurl/src/curl_sspi.c2
-rw-r--r--libs/libcurl/src/curl_threads.c13
-rw-r--r--libs/libcurl/src/curl_threads.h9
-rw-r--r--libs/libcurl/src/cw-out.c4
-rw-r--r--libs/libcurl/src/dict.c6
-rw-r--r--libs/libcurl/src/doh.c129
-rw-r--r--libs/libcurl/src/doh.h29
-rw-r--r--libs/libcurl/src/dynbuf.c4
-rw-r--r--libs/libcurl/src/dynhds.c2
-rw-r--r--libs/libcurl/src/dynhds.h36
-rw-r--r--libs/libcurl/src/easy.c28
-rw-r--r--libs/libcurl/src/easygetopt.c2
-rw-r--r--libs/libcurl/src/easyif.h2
-rw-r--r--libs/libcurl/src/easyoptions.c3
-rw-r--r--libs/libcurl/src/escape.c11
-rw-r--r--libs/libcurl/src/file.c62
-rw-r--r--libs/libcurl/src/fopen.c8
-rw-r--r--libs/libcurl/src/formdata.c20
-rw-r--r--libs/libcurl/src/formdata.h4
-rw-r--r--libs/libcurl/src/ftp.c186
-rw-r--r--libs/libcurl/src/ftp.h6
-rw-r--r--libs/libcurl/src/getenv.c6
-rw-r--r--libs/libcurl/src/getinfo.c14
-rw-r--r--libs/libcurl/src/gopher.c6
-rw-r--r--libs/libcurl/src/hash.c40
-rw-r--r--libs/libcurl/src/hash.h5
-rw-r--r--libs/libcurl/src/headers.c6
-rw-r--r--libs/libcurl/src/hmac.c2
-rw-r--r--libs/libcurl/src/hostip.c50
-rw-r--r--libs/libcurl/src/hostip.h8
-rw-r--r--libs/libcurl/src/hostip4.c22
-rw-r--r--libs/libcurl/src/hsts.c12
-rw-r--r--libs/libcurl/src/hsts.h4
-rw-r--r--libs/libcurl/src/http.c289
-rw-r--r--libs/libcurl/src/http.h11
-rw-r--r--libs/libcurl/src/http1.c2
-rw-r--r--libs/libcurl/src/http2.c160
-rw-r--r--libs/libcurl/src/http_aws_sigv4.c133
-rw-r--r--libs/libcurl/src/http_chunks.c11
-rw-r--r--libs/libcurl/src/http_chunks.h10
-rw-r--r--libs/libcurl/src/http_negotiate.c4
-rw-r--r--libs/libcurl/src/http_ntlm.c2
-rw-r--r--libs/libcurl/src/http_proxy.c1
-rw-r--r--libs/libcurl/src/idn.c84
-rw-r--r--libs/libcurl/src/imap.c18
-rw-r--r--libs/libcurl/src/inet_ntop.c17
-rw-r--r--libs/libcurl/src/inet_ntop.h7
-rw-r--r--libs/libcurl/src/inet_pton.c14
-rw-r--r--libs/libcurl/src/krb5.c24
-rw-r--r--libs/libcurl/src/ldap.c32
-rw-r--r--libs/libcurl/src/libcurl.rc2
-rw-r--r--libs/libcurl/src/macos.c4
-rw-r--r--libs/libcurl/src/md4.c148
-rw-r--r--libs/libcurl/src/md5.c179
-rw-r--r--libs/libcurl/src/memdebug.c14
-rw-r--r--libs/libcurl/src/memdebug.h5
-rw-r--r--libs/libcurl/src/mime.c30
-rw-r--r--libs/libcurl/src/mime.h4
-rw-r--r--libs/libcurl/src/mprintf.c8
-rw-r--r--libs/libcurl/src/mqtt.c22
-rw-r--r--libs/libcurl/src/multi.c610
-rw-r--r--libs/libcurl/src/multihandle.h24
-rw-r--r--libs/libcurl/src/multiif.h11
-rw-r--r--libs/libcurl/src/netrc.c4
-rw-r--r--libs/libcurl/src/netrc.h2
-rw-r--r--libs/libcurl/src/nonblock.c15
-rw-r--r--libs/libcurl/src/noproxy.c35
-rw-r--r--libs/libcurl/src/noproxy.h6
-rw-r--r--libs/libcurl/src/openldap.c4
-rw-r--r--libs/libcurl/src/parsedate.c14
-rw-r--r--libs/libcurl/src/pingpong.c8
-rw-r--r--libs/libcurl/src/pingpong.h4
-rw-r--r--libs/libcurl/src/pop3.c20
-rw-r--r--libs/libcurl/src/progress.c44
-rw-r--r--libs/libcurl/src/progress.h2
-rw-r--r--libs/libcurl/src/rand.c11
-rw-r--r--libs/libcurl/src/rename.c2
-rw-r--r--libs/libcurl/src/request.c38
-rw-r--r--libs/libcurl/src/request.h12
-rw-r--r--libs/libcurl/src/rtsp.c14
-rw-r--r--libs/libcurl/src/rtsp.h10
-rw-r--r--libs/libcurl/src/select.c168
-rw-r--r--libs/libcurl/src/select.h33
-rw-r--r--libs/libcurl/src/sendf.c76
-rw-r--r--libs/libcurl/src/sendf.h8
-rw-r--r--libs/libcurl/src/setopt.c149
-rw-r--r--libs/libcurl/src/setup-os400.h4
-rw-r--r--libs/libcurl/src/setup-vms.h2
-rw-r--r--libs/libcurl/src/setup-win32.h4
-rw-r--r--libs/libcurl/src/sha256.c32
-rw-r--r--libs/libcurl/src/share.c7
-rw-r--r--libs/libcurl/src/share.h2
-rw-r--r--libs/libcurl/src/smb.c4
-rw-r--r--libs/libcurl/src/smtp.c78
-rw-r--r--libs/libcurl/src/socketpair.c73
-rw-r--r--libs/libcurl/src/socketpair.h52
-rw-r--r--libs/libcurl/src/socks.c31
-rw-r--r--libs/libcurl/src/socks_gssapi.c12
-rw-r--r--libs/libcurl/src/socks_sspi.c12
-rw-r--r--libs/libcurl/src/splay.c12
-rw-r--r--libs/libcurl/src/splay.h2
-rw-r--r--libs/libcurl/src/strcase.c12
-rw-r--r--libs/libcurl/src/strerror.c43
-rw-r--r--libs/libcurl/src/strtok.c2
-rw-r--r--libs/libcurl/src/strtoofft.c16
-rw-r--r--libs/libcurl/src/strtoofft.h2
-rw-r--r--libs/libcurl/src/system_win32.c26
-rw-r--r--libs/libcurl/src/system_win32.h4
-rw-r--r--libs/libcurl/src/telnet.c26
-rw-r--r--libs/libcurl/src/tftp.c72
-rw-r--r--libs/libcurl/src/timediff.h2
-rw-r--r--libs/libcurl/src/timeval.c14
-rw-r--r--libs/libcurl/src/transfer.c249
-rw-r--r--libs/libcurl/src/transfer.h49
-rw-r--r--libs/libcurl/src/url.c239
-rw-r--r--libs/libcurl/src/url.h3
-rw-r--r--libs/libcurl/src/urlapi-int.h6
-rw-r--r--libs/libcurl/src/urlapi.c94
-rw-r--r--libs/libcurl/src/urldata.h123
-rw-r--r--libs/libcurl/src/vauth/cleartext.c8
-rw-r--r--libs/libcurl/src/vauth/cram.c2
-rw-r--r--libs/libcurl/src/vauth/digest.c22
-rw-r--r--libs/libcurl/src/vauth/digest_sspi.c14
-rw-r--r--libs/libcurl/src/vauth/krb5_gssapi.c6
-rw-r--r--libs/libcurl/src/vauth/krb5_sspi.c8
-rw-r--r--libs/libcurl/src/vauth/ntlm.c24
-rw-r--r--libs/libcurl/src/vauth/ntlm_sspi.c8
-rw-r--r--libs/libcurl/src/vauth/oauth2.c6
-rw-r--r--libs/libcurl/src/vauth/spnego_gssapi.c8
-rw-r--r--libs/libcurl/src/vauth/spnego_sspi.c29
-rw-r--r--libs/libcurl/src/vauth/vauth.c8
-rw-r--r--libs/libcurl/src/version.c15
-rw-r--r--libs/libcurl/src/version_win32.c14
-rw-r--r--libs/libcurl/src/vquic/curl_msh3.c11
-rw-r--r--libs/libcurl/src/vquic/curl_ngtcp2.c194
-rw-r--r--libs/libcurl/src/vquic/curl_osslq.c153
-rw-r--r--libs/libcurl/src/vquic/curl_quiche.c77
-rw-r--r--libs/libcurl/src/vquic/vquic-tls.c77
-rw-r--r--libs/libcurl/src/vquic/vquic-tls.h5
-rw-r--r--libs/libcurl/src/vquic/vquic.c102
-rw-r--r--libs/libcurl/src/vssh/libssh.c141
-rw-r--r--libs/libcurl/src/vssh/libssh2.c109
-rw-r--r--libs/libcurl/src/vssh/ssh.h6
-rw-r--r--libs/libcurl/src/vssh/wolfssh.c22
-rw-r--r--libs/libcurl/src/vtls/bearssl.c90
-rw-r--r--libs/libcurl/src/vtls/cipher_suite.c182
-rw-r--r--libs/libcurl/src/vtls/cipher_suite.h5
-rw-r--r--libs/libcurl/src/vtls/gtls.c498
-rw-r--r--libs/libcurl/src/vtls/gtls.h18
-rw-r--r--libs/libcurl/src/vtls/hostcheck.c2
-rw-r--r--libs/libcurl/src/vtls/hostcheck.h2
-rw-r--r--libs/libcurl/src/vtls/mbedtls.c325
-rw-r--r--libs/libcurl/src/vtls/openssl.c681
-rw-r--r--libs/libcurl/src/vtls/openssl.h5
-rw-r--r--libs/libcurl/src/vtls/rustls.c123
-rw-r--r--libs/libcurl/src/vtls/schannel.c336
-rw-r--r--libs/libcurl/src/vtls/schannel_int.h11
-rw-r--r--libs/libcurl/src/vtls/schannel_verify.c19
-rw-r--r--libs/libcurl/src/vtls/sectransp.c1421
-rw-r--r--libs/libcurl/src/vtls/vtls.c222
-rw-r--r--libs/libcurl/src/vtls/vtls.h10
-rw-r--r--libs/libcurl/src/vtls/vtls_int.h58
-rw-r--r--libs/libcurl/src/vtls/wolfssl.c648
-rw-r--r--libs/libcurl/src/vtls/wolfssl.h17
-rw-r--r--libs/libcurl/src/vtls/x509asn1.c34
-rw-r--r--libs/libcurl/src/vtls/x509asn1.h3
-rw-r--r--libs/libcurl/src/ws.c8
-rw-r--r--libs/libcurl/src/ws.h2
225 files changed, 13640 insertions, 9916 deletions
diff --git a/libs/libcurl/docs/CHANGES b/libs/libcurl/docs/CHANGES
index 5553b0a845..8ea9d27b12 100644
--- a/libs/libcurl/docs/CHANGES
+++ b/libs/libcurl/docs/CHANGES
@@ -6,6 +6,5764 @@
Changelog
+Version 8.9.0 (24 Jul 2024)
+
+Daniel Stenberg (24 Jul 2024)
+
+- RELEASE-NOTES: synced
+
+- THANKS: updates from the 8.9.0 release
+
+- curl_easy_escape.md: move historic details to HISTORY
+
+ Closes #14261
+
+- docs/libcurl: add to cleanup docs that their inputs go invalid
+
+ Reported-by: icy17 on github
+ Fixes #14248
+ Closes #14258
+
+dependabot[bot] (23 Jul 2024)
+
+- GHA: bump github/codeql-action from 3.25.11 to 3.25.13
+
+ Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.
+ 25.11 to 3.25.13.
+ - [Release notes](https://github.com/github/codeql-action/releases)
+ - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
+ - [Commits](https://github.com/github/codeql-action/compare/b611370bb5703a7ef
+ b587f9d136a52ea24c5c38c...2d790406f505036ef40ecba973cc774a50395aac)
+
+ ---
+ updated-dependencies:
+ - dependency-name: github/codeql-action
+ dependency-type: direct:production
+ update-type: version-update:semver-patch
+ ...
+
+ Signed-off-by: dependabot[bot] <support@github.com>
+ Closes #14255
+
+Stefan Eissing (23 Jul 2024)
+
+- conncache: connection shutdown, multi_socket handling
+
+ - implement the socket hash user/reader/writer processing also
+ for connections that are being shut down by the connection cache.
+ - split out handling of current vs. last pollset socket event handling
+ into a function available in other code parts
+ - add `shutdown_poll` pollset to `connectdata` struct so that changes
+ in the pollset can be recorded during shutdown. (The internal handle
+ cannot keep it since it might be used for many connections)
+
+ Reported-by: calvin2021y on github
+ Fixes #14252
+ Closes #14257
+
+Daniel Stenberg (22 Jul 2024)
+
+- tool_cb_prg: output "flying saucers" with leading carriage return
+
+ Because that is how the progress-bar is output, so when the progress-bar
+ has been shown at least once and the information is reset, like for a
+ redirect, there might be a moment where the size goes from known to
+ unknown and then the flying saucerts are shown after a brief display of
+ the progress-bar.
+
+ It could previously cause accidental character leftovers on the right
+ side of the bar when using a narrow display.
+
+ Reported-by: Chris Webb
+ Fixes #14213
+ Closes #14246
+
+- lib: Curl_posttransfer => multi_posttransfer
+
+ Moved from transfer.c to multi.c as it was only used within multi.c
+
+ Made a void, as it returned a fixed return code nothing checked.
+
+ Closes #14240
+
+- CURLOPT_SSL_VERIFYHOST.md: refresh
+
+ Move mentions of old behavior to the HISTORY section to make it easier
+ to read about modern behavior.
+
+ Added a MATCHING section.
+
+ Closes #14241
+
+- multi: do a final progress update on connect failure
+
+ To fix timing metric etc
+
+ Co-authored-by: Justin Maggard
+ Fixes #14204
+ Closes #14239
+
+Orgad Shaneh (19 Jul 2024)
+
+- md4: fix compilation with OpenSSL 1.x with md4 disabled
+
+ If OpenSSL 1.x is used, and it is configured with md4 disabled,
+ OPENSSL_NO_MD4 is defined in opensslconf.h, but this header was not
+ included before checking for this define.
+
+ Later in md4.c, openssl/md4.h is included, and it includes that header
+ indirectly, leading to inconsistency within md4.c.
+
+ Since the md4.h branch was taken, wincrypt.h (or others) is not
+ included, and later below the USE_WIN32_CRYPTO branch is taken, but the
+ types are not defined.
+
+ Closes #14218
+
+martinevsky (19 Jul 2024)
+
+- ftp: remove redundant null pointer check in loop condition
+
+ Closes #14234
+
+Justin Maggard (19 Jul 2024)
+
+- mbedtls: check version before getting tls version
+
+ mbedtls_ssl_get_version_number() was added in mbedtls 3.2.0. Check for
+ that version before using it.
+
+ Closes #14228
+
+martinevsky (19 Jul 2024)
+
+- urlapi: remove unused definition of HOST_BAD
+
+ Closes #14235
+
+Daniel Stenberg (19 Jul 2024)
+
+- curldown: fixups
+
+ - make DEFAULT sections less repetitive
+
+ - make historic mentions use HISTORY
+
+ - generate the protocols section on `# %PROTOCOLS%` instead of guessing
+ where to put it
+
+ - generate the availability section on `# %AVAILABILITY%` instead of
+ guessing where to put it
+
+ - make the protocols section more verbose
+
+ Closes #14227
+
+Tal Regev (19 Jul 2024)
+
+- GHA/windows: enable libssh in !ssl MSVC job
+
+ Closes #14232
+
+- GHA/windows: enable libidn2 in !ssl MSVC job
+
+ Closes #14200
+
+Viktor Szakats (19 Jul 2024)
+
+- GHA/macos: improve, fix gcc/llvm, add new test matrix
+
+ This PR began as an attempt to drop GCC support, after repeated reports
+ on fallouts when trying to use it on macOS.
+
+ Then it transformed into a 3-week project turning up the issues causing
+ the fallouts, ending up including llvm and all available Xcode / macOS
+ SDK, macOS runner image, build tools and compiler vendors and versions.
+ Accumulating 400 sub-commits.
+
+ I developed and tested all fixes under this PR, then merged them as
+ separate patches.
+
+ This PR retained CI jobs updates, extensively reworking and extending
+ them: [1]
+
+ At first it seemed GCC and the Apple SDK is "naturally" growing more
+ incompatible, as Apple added further non-standard features to their
+ headers. This is partly true, but reality is more complicated.
+
+ Besides some issues local to curl, there were bugs in Apple SDK
+ headers, Homebrew GCC builds, feature missing in the old llvm version
+ pre-installed on GitHub CI runner images, and subtle incompatibilities
+ between GCC and llvm/clang when handling language extensions.
+
+ Resulting compiler errors seldom pointed to a useful direction, and
+ internet search was silent about these issues too. Thus, I had to peel
+ them off layer by layer, using trial and error, and by recognizing
+ patterns of failures accross 150-200 builds combinations. Exposing
+ configure logs, and curl_config.h in the CI logs helped too.
+
+ 1. GCC header compatibility layer ("hack" as GCC calls it)
+
+ The toughest issue is GCC's built-in compatibility layer:
+ https://github.com/gcc-mirror/gcc/tree/master/fixincludes
+
+ This patch layer is further patched by a "Darwin compatibility" project
+ applied on top by Homebrew GCC via:
+ https://github.com/iains/gcc-12-branch
+ https://github.com/iains/gcc-13-branch
+ https://github.com/iains/gcc-14-branch
+
+ The hack layer is designed in a way that breaks more builds than it
+ fixes, esp. in context of GHA runners. The idea is to build GCC
+ specifically for the SDK for the target macOS version. The problem with
+ this approach is that the Xcode + SDK installed on the local/CI machine
+ often does not match with the SDK used on while building GCC on
+ Homebrew's build machines. In these cases the GCC compatibility layer
+ turns into an "uncompatibility" layer and consistently breaks builds.
+ curl cannot offer a fix for this, because the solution (I found) is to
+ patch the toolchain on the local machine. I implemented this for our CI
+ builds and curl-for-win. In other case the user must do this patching
+ manually, or choose a compatible GCC + Xcode/SDK combination.
+
+ An upstream fix doesn't seem trivial either, because the issue is
+ ingrained in the compatibility layer's design. Offering an `-fapplesdk`
+ (or recognizing `-target`) option and/or fixing them within the compiler
+ would seem like a more robust option, and also how mainline llvm solves
+ this.
+
+ Here's a table summarizing the GCC + SDK combinations and curl build
+ failures: [2]
+
+ More info: https://github.com/curl/curl/issues/10356#issuecomment-2222734103
+
+ db135f8d7207b20d531e7e2100a49f3e16bdcfab #14119 macos: add workaround for gcc
+ , non-c-ares, IPv6, compile error
+ Ref: https://github.com/curl/curl-for-win/commit/e2db3c475f5981352e6e6a79854a
+ 255805b28deb
+ Ref: https://github.com/curl/curl-for-win/commit/f5c58d7fef78e972be33ca2355dc
+ b42ba56622a6
+
+ 2. Homebrew GCC's `availability` extension
+
+ A recent minor Homebrew GCC upgrade caused major breakage. The "Darwin
+ compatibility" patch applied to GCC implemented the `availability`
+ compiler attribute in GCC. Apple SDK detected this and enabled using
+ them, but as it turns out GCC accepts compiler attributes with slightly
+ different rules than llvm/clang, and how the Apple SDK uses them,
+ breaking builds.
+
+ Affected Homebrew GCC versions are: 12.4.0, 13.3.0 and 14.1.0.
+
+ Possibly tracked here: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108796
+ More info: https://github.com/llvm/llvm-project/issues/81767
+
+ Commit implementing the `availability` macro:
+ gcc-12: https://github.com/iains/gcc-12-branch/commit/fd5530b7cb0012bf4faeddd
+ 45e13054a1dfa6783
+ gcc-13: https://github.com/iains/gcc-13-branch/commit/cb7e4eca68cfc4763474e2e
+ b0935a844458842a8
+ gcc-14: https://github.com/iains/gcc-14-branch/commit/ff62a108865a6403f501738
+ 0d7018250c1d3306f
+
+ That applied to Homebrew GCC (12.4.0):
+ https://github.com/Homebrew/homebrew-core/commit/b904223d9893f62bec2a8f7483bf
+ 5992747fc6c7#diff-89dd0b4176eca7fcc24b591943509bf8a8d6ea904d71e5dfcd6b78fed62
+ fc574R44-R48
+
+ Ref: #13700
+ More info: https://github.com/curl/curl/pull/14091#issuecomment-2222703468
+
+ e91fcbac7d86292858718a0bfebad57978761af4 #14155 macos: undo `availability` ma
+ cro enabled by Homebrew gcc
+
+ 3. Proprietary Apple SDK macros
+
+ Apple SDK expects certain macros predefined by the compiler. Missing
+ them may causes odd issues. Mainline llvm is keeping up with Apple
+ clang, but it needs a fresh version, while the one installed on GitHub
+ runners is old (v15). I patched these in `lib/curl_setup.h`.
+
+ baa3270846b2a7307cbd0dd5c02c4e5f00e388dd #14134 build: fix llvm 16 or older +
+ Xcode 15 or newer, and gcc
+
+ 4. Apple SDK header bug
+
+ Without certain predefined macros, SDK headers can take a codepath where
+ it mis-defines its own `TARGET_OS_OSX` macro, which make it break its
+ own headers later. I patched it in `lib/curl_setup.h`.
+
+ ff784af461175584c73e7e2b65af00b1a5a6f67f #14159 build: fix llvm 17 and older
+ + macOS SDK 14.4 and newer
+
+ 5. `TargetConditionals.h` requires `sys/types.h`
+
+ Fixed in curl. It caused feature-detection failurs with autotools, and
+ could break builds in certain configurations.
+
+ e1f6192939c9c5ab2310b60bedf4c07d635193f6 #14130 configure: fix `SystemConfigu
+ ration` detection
+
+ 6. Differences between autotools and CMake compiler options
+
+ Fixed it by syncing compiler warning options.
+
+ 59cadacfcc1d39472245979cdbd614c7a9af6f0d #14128 build: sync warning options b
+ etween autotools, cmake & compilers
+
+ 7. Differences between autotools and CMake dependency detection
+
+ Fixed it by improving detection of libidn2, with some more fixes
+ pending for the next feature window.
+
+ f43adc2c4978f7f82a359e89186e58a31d17b0ad #14137 cmake: detect `libidn2` also
+ via `pkg-config`
+ Ref: #14136 cmake: detect `nghttp2` via `pkg-config`, enable by default
+
+ 8. libidn2 detection bug with CMake
+
+ Fixed the root cause and also the trigger in the CI config.
+
+ 764fbabf6ed4c1d36c0ab2033ac8df52d9923cd7 #14175 cmake: fix builds with detect
+ ed libidn2 lib but undetected header
+
+ 9. Suppressed compiler warnings inside Apple-specific curl code
+
+ Fixed these warnings, which allowed to stop silencing them.
+
+ b05dc7eb3592305de9fa85640767f3dd2a8d4c93 #14122 sectransp: fix `HAVE_BUILTIN_
+ AVAILABLE` checks to not emit warnings
+ 5fa534b0dacdc120aaab0766047e0ecac37be4b3 #14162 sectransp: fix clang compiler
+ warnings, stop silencing them
+
+ 10. CMake mis-detecting a CA bundle path on macOS
+
+ d2ef6255f4a040397d2f40ab7cbf65a203201cd9 #14182 cmake: sync CA bundle/path de
+ tection with autotools
+
+ 11. Failure to build tests with LibreSSL or wolfSSL with CMake
+
+ Fixed by dropping unnecessary includes, makign test builds dependent
+ on dependency headers.
+
+ 3765d75ce47b66289f946382b649d0e99389dc77 #14172 cmake: fix building `unit1600
+ ` due to missing `ssl/openssl.h`
+
+ 12. curl tests with CMake
+
+ curl's CMake was missing bits for running the C preprocessor accurately.
+ It made tests 1119 and 1167 fail. I implemented the missing bits.
+
+ efc2c5184d008fe2e5910fd03263e1ab0331d4e6 #14124 tests: include current direct
+ ory when running test Perl commands
+ c09db8b51b88ee6ad55bd637dcb4b47678e30906 #14129 cmake: create `configurehelp.
+ pm` like autotools does
+ 67cc1e3400b77536a3ca529c986247e1ef985e6e #14125 test1119: adapt for `.md` inp
+ ut
+
+ 13. GCC missing `__builtin_available()` support
+
+ curl source code assumes this is available to enable certain codepaths.
+ It's also intermixed with monotonic timer support.
+
+ 14. Monotonic timer support with GCC
+
+ Detected by GCC, while it probably shouldn't be. llvm/clang detects it
+ depending on target OS version. I've been playing with this, but so far
+ without a conclusion or fix.
+
+ 15. Runtime/test failures with GCC
+
+ I couldn't find the reason for most of this. A bunch of RTSP tests fail
+ with GCC. SecureTransport + HTTP/2 is failing a bunch of tests. With
+ OpenSSL it fails two of those. SecureTransport builds also fail one DoH
+ test.
+
+ 16. Runtime/test failure in llvm/clang
+
+ AppleIDN support received a fix with two more remaining.
+
+ fd0250869f7837e4a48d7e6f54cc0801ad3820e8 #14179 #14176 IDN: fix ß with Apple
+ IDN
+
+ 17. Other issues found and fixed while working on this:
+
+ 2c15aa5765900d4351e733671a1c8c3785beee1a GHA/macos: delete misplaced `
+ CFLAGS`, drop redundant CMake option
+ 80fb7c0bef209735ab352bf4afa31193a7bc65f1 #14126 configure: limit `SystemConfi
+ guration` test to non-c-ares, IPv6 builds
+ cfd6f43d6ca7e57670b422bab7bbf10221a2cf3e #14127 build: tidy up `__builtin_ava
+ ilable` feature checks (Apple)
+ bae555359979016999a9425a2d489f219a78abdd #14174 runtests: show name and keywo
+ rds for failed tests in summary
+ 09cdf7e5315711dea4ce7dcf5d99a4d41e7f658b #14178 cmake: delete unused `HAVE_LI
+ BSSH2`, `HAVE_LIBSOCKET` macros
+ d3595c74fab829f07ef44da1b7fc2f5668767020 #14186 configure: CA bundle/path det
+ ection fixes
+ 58772b0e082eda333e0a5fc8fb0bc7f17a3cd99c #14187 runtests: set `SOURCE_DATE_EP
+ OCH` to fix failing around midnight
+ 18f1cd7a77c4beecfd977d43f55634eb593ac99e #14183 tests: sync feature names wit
+ h `curl -V`
+ 4c22d97be786ed801e050da6872dd3143d6d0a59 #14181 build: use `#error` instead o
+ f invalid syntax
+
+ Pending merges:
+
+ - #14185 runtests: fold test details for GitHub CI runs
+ - #14197 cmake: grab-bag of tidy-ups
+ - #14196 configure: limit `__builtin_available` test to Darwin
+
+ Summary:
+
+ In general GCC doesn't seem to be a good fit with curl and macOS for
+ now. These "lucky" combinations (GitHub Actions runner) will build out
+ of the box now: macos-14 + Xcode 15.0.1 + gcc-11, gcc-12, gcc-14. The
+ rest builds with the ugly workaround in place, but all this still leaves
+ some runtime issues.
+
+ More info and links in the commit messages and source code.
+
+ [1]: This PR:
+ - add info about target OS version requirements per feature, with OS
+ names and release years.
+ - stop using `-Wno-deprecated-declarations` to suppress warnings.
+ - use `LDFLAGS=-w` to suppress 'object file was built for newer macOS
+ version than being linked' warnings.
+ (there were tens of thousands of them in some jobs)
+ - allow overriding Xcode version in all jobs.
+ - improve job names.
+ - abbreviate CMake as CM, autotools as AM for more compact job names.
+ - shorten job names by using `!` instead of `no-` and `non-`.
+ - bump parellel tests to 10 (from 5).
+ - drop using `--enable-maintainer-mode` `./configure` option.
+ - add gcc-12 no-ssl, autotools job with tests, ignore failing test
+ results. (It's not yet clear why gcc-12 builds have different runtime
+ results than clang/llvm ones.)
+ - add comments with OS names and release years next to version numbers,
+ e.g. 10.15 # Catalina (2019)
+ - fix broken gcc-12 SecureTransport build.
+ - show compiler, Xcode, SDK, gcc hack SDK versions, Homebrew
+ preinstalled packages and C compiler predefined macros for each job.
+ Useful for debugging all the strange problems these builds might have.
+ - merge brew bundle and install steps.
+ - move step names to the top.
+ - dump configure log for both cmake and autotools also for successful
+ builds. Useful for debugging.
+ - dump curl_config.h in short (sorted #defines) and full form.
+ - add support for the mainline llvm compiler.
+ - set sysroot for gcc and llvm.
+ - add timeout for cmake jobs.
+ - add new job matrix: combinations
+ It supports building all possible compiler, runner image, Xcode/SDK
+ combinations, with cmake and autotools, target OS versions and with or
+ without SecureTransport. It's quick. GHA limits the maximum number of
+ matrix jobs at 256.
+ I used this as a test-rig to fix the macOS build fallouts with gcc and
+ llvm.
+ I settled with 16 jobs, trying to maximize fallout coverage.
+ - implement hack to make Homebrew gcc work with all available SDKs.
+ - add handy mini-table about Xcode / SDK versions, OS names, years for
+ each GHA images, with the defaults.
+ - add tests for cmake jobs.
+ - make cmake config hack to link GnuTLS less intrusive.
+ - stop ignoring test 1452, seems fine now.
+ - fix to enable libpsl in autotools builds.
+ - enable libpsl in cmake builds.
+ - add an llvm job with tests (both autotools and cmake).
+ - delete similar macOS jobs from Circle CI. GHA is now arm64 too.
+
+ [2]: Homebrew GCC vs GHA runner images vs curl builds:
+ ```
+ macOS Xcode gcc gcc SDK hacks Xcode SDK SDK major Build
+ Compile
+ (*def) (Homebrew) (CommandLineTools) versions
+ error
+ -------- -------- ---------- ------------------ ---------- --------- -----
+ ---------------------
+ macos-12 13.1 GCC 11.4.0 MacOSX12 MacOSX12.0
+ macos-12 13.2.1 GCC 11.4.0 MacOSX12 MacOSX12.1
+ macos-12 13.3.1 GCC 11.4.0 MacOSX12 MacOSX12.3
+ macos-12 13.4.1 GCC 11.4.0 MacOSX12 MacOSX12.3
+ macos-12 14.0.1 GCC 11.4.0 MacOSX12 MacOSX12.3
+ macos-12 14.1 GCC 11.4.0 MacOSX12 MacOSX13.0 MISMATCH FAIL
+ /Applications/Xcode_14.1.app/Contents/Developer/Platforms/MacOSX.platform/De
+ veloper/SDKs/MacOSX.sdk/usr/include/os/object.h:275:1: error: expected ';' be
+ fore 'extern'
+ macos-12 *14.2 GCC 11.4.0 MacOSX12 MacOSX13.1 MISMATCH FAIL
+ /Applications/Xcode_14.2.app/Contents/Developer/Platforms/MacOSX.platform/De
+ veloper/SDKs/MacOSX.sdk/usr/include/os/object.h:275:1: error: expected ';' be
+ fore 'extern'
+ macos-13 14.1 GCC 11.4.0 MacOSX13 MacOSX13.0
+ macos-13 14.2 GCC 11.4.0 MacOSX13 MacOSX13.1
+ macos-13 14.3.1 GCC 11.4.0 MacOSX13 MacOSX13.3
+ macos-13 *15.0.1 GCC 11.4.0 MacOSX13 MacOSX14.0 MISMATCH FAIL
+ /Applications/Xcode_15.0.1.app/Contents/Developer/Platforms/MacOSX.platform/
+ Developer/SDKs/MacOSX.sdk/usr/include/dispatch/queue.h:103:1: error: unknown
+ type name 'dispatch_queue_t'
+ macos-13 15.1 GCC 11.4.0 MacOSX13 MacOSX14.2 MISMATCH FAIL
+ /Applications/Xcode_15.1.app/Contents/Developer/Platforms/MacOSX.platform/De
+ veloper/SDKs/MacOSX.sdk/usr/include/dispatch/queue.h:103:1: error: unknown ty
+ pe name 'dispatch_queue_t'
+ macos-13 15.2 GCC 11.4.0 MacOSX13 MacOSX14.2 MISMATCH FAIL
+ /Applications/Xcode_15.2.app/Contents/Developer/Platforms/MacOSX.platform/De
+ veloper/SDKs/MacOSX.sdk/usr/include/dispatch/queue.h:103:1: error: unknown ty
+ pe name 'dispatch_queue_t'
+ macos-14 14.3.1 GCC 11.4.0 MacOSX14 MacOSX13.3 MISMATCH FAIL
+ /Users/runner/work/curl/curl/bld/lib/curl_config.h:792:19: error: two or mor
+ e data types in declaration specifiers
+ macos-14 *15.0.1 GCC 11.4.0 MacOSX14 MacOSX14.0
+ macos-14 15.1 GCC 11.4.0 MacOSX14 MacOSX14.2
+ macos-14 15.2 GCC 11.4.0 MacOSX14 MacOSX14.2
+ macos-14 15.3 GCC 11.4.0 MacOSX14 MacOSX14.4
+ macos-14 15.4 GCC 11.4.0 MacOSX14 MacOSX14.5
+ macos-14 16.0 GCC 11.4.0 MacOSX14 MacOSX15.0 MISMATCH FAIL
+ /opt/homebrew/Cellar/gcc@11/11.4.0/lib/gcc/11/gcc/aarch64-apple-darwin23/11/
+ include-fixed/stdio.h:83:8: error: unknown type name 'FILE'
+ macos-12 13.1 GCC 12.4.0 MacOSX12 MacOSX12.0
+ macos-12 13.2.1 GCC 12.4.0 MacOSX12 MacOSX12.1
+ macos-12 13.3.1 GCC 12.4.0 MacOSX12 MacOSX12.3
+ macos-12 13.4.1 GCC 12.4.0 MacOSX12 MacOSX12.3
+ macos-12 14.0.1 GCC 12.4.0 MacOSX12 MacOSX12.3
+ macos-12 14.1 GCC 12.4.0 MacOSX12 MacOSX13.0 MISMATCH FAIL
+ /Applications/Xcode_14.1.app/Contents/Developer/Platforms/MacOSX.platform/De
+ veloper/SDKs/MacOSX.sdk/usr/include/os/object.h:275:1: error: expected ';' be
+ fore 'extern'
+ macos-12 *14.2 GCC 12.4.0 MacOSX12 MacOSX13.1 MISMATCH FAIL
+ /Applications/Xcode_14.2.app/Contents/Developer/Platforms/MacOSX.platform/De
+ veloper/SDKs/MacOSX.sdk/usr/include/os/object.h:275:1: error: expected ';' be
+ fore 'extern'
+ macos-13 14.1 GCC 12.4.0 MacOSX13 MacOSX13.0
+ macos-13 14.2 GCC 12.4.0 MacOSX13 MacOSX13.1
+ macos-13 14.3.1 GCC 12.4.0 MacOSX13 MacOSX13.3
+ macos-13 *15.0.1 GCC 12.4.0 MacOSX13 MacOSX14.0 MISMATCH FAIL
+ /Applications/Xcode_15.0.1.app/Contents/Developer/Platforms/MacOSX.platform/
+ Developer/SDKs/MacOSX.sdk/usr/include/dispatch/queue.h:103:1: error: unknown
+ type name 'dispatch_queue_t'
+ macos-13 15.1 GCC 12.4.0 MacOSX13 MacOSX14.2 MISMATCH FAIL
+ /Applications/Xcode_15.1.app/Contents/Developer/Platforms/MacOSX.platform/De
+ veloper/SDKs/MacOSX.sdk/usr/include/dispatch/queue.h:103:1: error: unknown ty
+ pe name 'dispatch_queue_t'
+ macos-13 15.2 GCC 12.4.0 MacOSX13 MacOSX14.2 MISMATCH FAIL
+ /Applications/Xcode_15.2.app/Contents/Developer/Platforms/MacOSX.platform/De
+ veloper/SDKs/MacOSX.sdk/usr/include/dispatch/queue.h:103:1: error: unknown ty
+ pe name 'dispatch_queue_t'
+ macos-14 14.3.1 GCC 12.4.0 MacOSX14 MacOSX13.3 MISMATCH
+ macos-14 *15.0.1 GCC 12.4.0 MacOSX14 MacOSX14.0
+ macos-14 15.1 GCC 12.4.0 MacOSX14 MacOSX14.2
+ macos-14 15.2 GCC 12.4.0 MacOSX14 MacOSX14.2
+ macos-14 15.3 GCC 12.4.0 MacOSX14 MacOSX14.4
+ macos-14 15.4 GCC 12.4.0 MacOSX14 MacOSX14.5
+ macos-14 16.0 GCC 12.4.0 MacOSX14 MacOSX15.0 MISMATCH FAIL
+ /opt/homebrew/Cellar/gcc@12/12.4.0/lib/gcc/12/gcc/aarch64-apple-darwin23/12/
+ include-fixed/stdio.h:83:8: error: unknown type name 'FILE'
+ macos-12 13.1 GCC 13.3.0 MacOSX12 MacOSX12.0
+ macos-12 13.2.1 GCC 13.3.0 MacOSX12 MacOSX12.1
+ macos-12 13.3.1 GCC 13.3.0 MacOSX12 MacOSX12.3
+ macos-12 13.4.1 GCC 13.3.0 MacOSX12 MacOSX12.3
+ macos-12 14.0.1 GCC 13.3.0 MacOSX12 MacOSX12.3
+ macos-12 14.1 GCC 13.3.0 MacOSX12 MacOSX13.0 MISMATCH FAIL
+ /Users/runner/work/curl/curl/bld/lib/curl_config.h:792:19: error: two or mor
+ e data types in declaration specifiers
+ macos-12 *14.2 GCC 13.3.0 MacOSX12 MacOSX13.1 MISMATCH FAIL
+ /Users/runner/work/curl/curl/bld/lib/curl_config.h:792:19: error: two or mor
+ e data types in declaration specifiers
+ macos-13 14.1 GCC 13.3.0 MacOSX13 MacOSX13.0
+ macos-13 14.2 GCC 13.3.0 MacOSX13 MacOSX13.1
+ macos-13 14.3.1 GCC 13.3.0 MacOSX13 MacOSX13.3
+ macos-13 *15.0.1 GCC 13.3.0 MacOSX13 MacOSX14.0 MISMATCH FAIL
+ /Users/runner/work/curl/curl/bld/lib/curl_config.h:792:19: error: two or mor
+ e data types in declaration specifiers
+ macos-13 15.1 GCC 13.3.0 MacOSX13 MacOSX14.2 MISMATCH FAIL
+ /Users/runner/work/curl/curl/bld/lib/curl_config.h:792:19: error: two or mor
+ e data types in declaration specifiers
+ macos-13 15.2 GCC 13.3.0 MacOSX13 MacOSX14.2 MISMATCH FAIL
+ /Users/runner/work/curl/curl/bld/lib/curl_config.h:792:19: error: two or mor
+ e data types in declaration specifiers
+ macos-14 14.3.1 GCC 13.3.0 MacOSX14 MacOSX13.3 MISMATCH FAIL
+ /Users/runner/work/curl/curl/bld/lib/curl_config.h:792:19: error: two or mor
+ e data types in declaration specifiers
+ macos-14 *15.0.1 GCC 13.3.0 MacOSX14 MacOSX14.0 FAIL
+ /Users/runner/work/curl/curl/bld/lib/curl_config.h:792:19: error: two or mor
+ e data types in declaration specifiers
+ macos-14 15.1 GCC 13.3.0 MacOSX14 MacOSX14.2 FAIL
+ /Users/runner/work/curl/curl/bld/lib/curl_config.h:792:19: error: two or mor
+ e data types in declaration specifiers
+ macos-14 15.2 GCC 13.3.0 MacOSX14 MacOSX14.2 FAIL
+ /Users/runner/work/curl/curl/bld/lib/curl_config.h:792:19: error: two or mor
+ e data types in declaration specifiers
+ macos-14 15.3 GCC 13.3.0 MacOSX14 MacOSX14.4
+ macos-14 15.4 GCC 13.3.0 MacOSX14 MacOSX14.5
+ macos-14 16.0 GCC 13.3.0 MacOSX14 MacOSX15.0 MISMATCH FAIL
+ /opt/homebrew/Cellar/gcc@13/13.3.0/lib/gcc/13/gcc/aarch64-apple-darwin23/13/
+ include-fixed/stdio.h:83:8: error: unknown type name 'FILE'
+ macos-12 13.1 GCC 14.1.0 MacOSX12 MacOSX12.0
+ macos-12 13.2.1 GCC 14.1.0 MacOSX12 MacOSX12.1
+ macos-12 13.3.1 GCC 14.1.0 MacOSX12 MacOSX12.3
+ macos-12 13.4.1 GCC 14.1.0 MacOSX12 MacOSX12.3
+ macos-12 14.0.1 GCC 14.1.0 MacOSX12 MacOSX12.3
+ macos-12 14.1 GCC 14.1.0 MacOSX12 MacOSX13.0 MISMATCH FAIL
+ /Applications/Xcode_14.1.app/Contents/Developer/Platforms/MacOSX.platform/De
+ veloper/SDKs/MacOSX.sdk/usr/include/os/object.h:275:1: error: expected ';' be
+ fore 'extern'
+ macos-12 *14.2 GCC 14.1.0 MacOSX12 MacOSX13.1 MISMATCH FAIL
+ /Applications/Xcode_14.2.app/Contents/Developer/Platforms/MacOSX.platform/De
+ veloper/SDKs/MacOSX.sdk/usr/include/os/object.h:275:1: error: expected ';' be
+ fore 'extern'
+ macos-13 14.1 GCC 14.1.0 MacOSX13 MacOSX13.0
+ macos-13 14.2 GCC 14.1.0 MacOSX13 MacOSX13.1
+ macos-13 14.3.1 GCC 14.1.0 MacOSX13 MacOSX13.3
+ macos-13 *15.0.1 GCC 14.1.0 MacOSX13 MacOSX14.0 MISMATCH FAIL
+ /Applications/Xcode_15.0.1.app/Contents/Developer/Platforms/MacOSX.platform/
+ Developer/SDKs/MacOSX.sdk/usr/include/dispatch/queue.h:70:1: error: type defa
+ ults to 'int' in declaration of 'DISPATCH_DECL_FACTORY_CLASS_SWIFT' [-Wimplic
+ it-int]
+ macos-13 15.1 GCC 14.1.0 MacOSX13 MacOSX14.2 MISMATCH FAIL
+ /Applications/Xcode_15.1.app/Contents/Developer/Platforms/MacOSX.platform/De
+ veloper/SDKs/MacOSX.sdk/usr/include/dispatch/queue.h:70:1: error: type defaul
+ ts to 'int' in declaration of 'DISPATCH_DECL_FACTORY_CLASS_SWIFT' [-Wimplicit
+ -int]
+ macos-13 15.2 GCC 14.1.0 MacOSX13 MacOSX14.2 MISMATCH FAIL
+ /Applications/Xcode_15.2.app/Contents/Developer/Platforms/MacOSX.platform/De
+ veloper/SDKs/MacOSX.sdk/usr/include/dispatch/queue.h:70:1: error: type defaul
+ ts to 'int' in declaration of 'DISPATCH_DECL_FACTORY_CLASS_SWIFT' [-Wimplicit
+ -int]
+ macos-14 14.3.1 GCC 14.1.0 MacOSX14 MacOSX13.3 MISMATCH
+ macos-14 *15.0.1 GCC 14.1.0 MacOSX14 MacOSX14.0
+ macos-14 15.1 GCC 14.1.0 MacOSX14 MacOSX14.2
+ macos-14 15.2 GCC 14.1.0 MacOSX14 MacOSX14.2
+ macos-14 15.3 GCC 14.1.0 MacOSX14 MacOSX14.4
+ macos-14 15.4 GCC 14.1.0 MacOSX14 MacOSX14.5
+ macos-14 16.0 GCC 14.1.0 MacOSX14 MacOSX15.0 MISMATCH FAIL
+ /opt/homebrew/Cellar/gcc/14.1.0_1/lib/gcc/current/gcc/aarch64-apple-darwin23
+ /14/include-fixed/stdio.h:83:8: error: unknown type name 'FILE'
+ ```
+ Source: https://github.com/curl/curl/actions/runs/9883956647/job/27299564218
+
+ This commit fixes earlier commit
+ 1e75edd372868048c9f805ac4ca6d2cb5a88ff5a, reverted in
+ 41a7e0dcc9681afd91e066411bcee4f369c23366, where I cut the commit
+ message in half by accident. The patch itself is identical.
+
+ Closes #14097
+
+- Revert "GHA/macos: improve, fix gcc/llvm, add new test matrix"
+
+ This reverts commit 1e75edd372868048c9f805ac4ca6d2cb5a88ff5a.
+
+ Due to some parts of the commit message missing (my bad.)
+
+Daniel Stenberg (19 Jul 2024)
+
+- Revert "lib: send eos flag"
+
+ This reverts commit be93299f10ef0b2bf7fe5c82140120073831867a.
+
+Viktor Szakats (19 Jul 2024)
+
+- GHA/windows: use default shell CI feature
+
+ It makes repeating a line in each step unnecessary.
+
+ Closes #14206
+
+- GHA/macos: improve, fix gcc/llvm, add new test matrix
+
+ This PR began as an attempt to drop GCC support, after repeated reports
+ on fallouts when trying to use it on macOS.
+
+ Then it transformed into a 3-week project turning up the issues causing
+ the fallouts, ending up including llvm and all available Xcode / macOS
+ SDK, macOS runner image, build tools and compiler vendors and versions.
+ Accumulating 400 sub-commits.
+
+ I developed and tested all fixes under this PR, then merged them as
+ separate patches.
+
+ This PR retained CI jobs updates, extensively reworking and extending
+ them: [1]
+
+ At first it seemed GCC and the Apple SDK is "naturally" growing more
+ incompatible, as Apple added further non-standard features to their
+ headers. This is partly true, but reality is more complicated.
+
+ Besides some issues local to curl, there were bugs in Apple SDK
+ headers, Homebrew GCC builds, feature missing in the old llvm version
+ pre-installed on GitHub CI runner images, and subtle incompatibilities
+ between GCC and llvm/clang when handling language extensions.
+
+ Resulting compiler errors seldom pointed to a useful direction, and
+ internet search was silent about these issues too. Thus, I had to peel
+ them off layer by layer, using trial and error, and by recognizing
+ patterns of failures accross 150-200 builds combinations. Exposing
+ configure logs, and curl_config.h in the CI logs helped too.
+
+ 1. GCC header compatibility layer ("hack" as GCC calls it)
+
+ The toughest issue is GCC's built-in compatibility layer:
+ https://github.com/gcc-mirror/gcc/tree/master/fixincludes
+
+ This patch layer is further patched by a "Darwin compatibility" project
+ applied on top by Homebrew GCC via:
+ https://github.com/iains/gcc-12-branch
+ https://github.com/iains/gcc-13-branch
+ https://github.com/iains/gcc-14-branch
+
+ The hack layer is designed in a way that breaks more builds than it
+ fixes, esp. in context of GHA runners. The idea is to build GCC
+ specifically for the SDK for the target macOS version. The problem with
+ this approach is that the Xcode + SDK installed on the local/CI machine
+ often does not match with the SDK used on while building GCC on
+ Homebrew's build machines. In these cases the GCC compatibility layer
+ turns into an "uncompatibility" layer and consistently breaks builds.
+ curl cannot offer a fix for this, because the solution (I found) is to
+ patch the toolchain on the local machine. I implemented this for our CI
+ builds and curl-for-win. In other case the user must do this patching
+ manually, or choose a compatible GCC + Xcode/SDK combination.
+
+ An upstream fix doesn't seem trivial either, because the issue is
+ ingrained in the compatibility layer's design. Offering an `-fapplesdk`
+ (or recognizing `-target`) option and/or fixing them within the compiler
+ would seem like a more robust option, and also how mainline llvm solves
+ this.
+
+ Here's a table summarizing the GCC + SDK combinations and curl build
+ failures: [2]
+
+ More info: https://github.com/curl/curl/issues/10356#issuecomment-2222734103
+
+ db135f8d7207b20d531e7e2100a49f3e16bdcfab #14119 macos: add workaround for gcc
+ , non-c-ares, IPv6, compile error
+ Ref: https://github.com/curl/curl-for-win/commit/e2db3c475f5981352e6e6a79854a
+ 255805b28deb
+ Ref: https://github.com/curl/curl-for-win/commit/f5c58d7fef78e972be33ca2355dc
+ b42ba56622a6
+
+ 2. Homebrew GCC's `availability` extension
+
+ A recent minor Homebrew GCC upgrade caused major breakage. The "Darwin
+ compatibility" patch applied to GCC implemented the `availability`
+ compiler attribute in GCC. Apple SDK detected this and enabled using
+ them, but as it turns out GCC accepts compiler attributes with slightly
+ different rules than llvm/clang, and how the Apple SDK uses them,
+ breaking builds.
+
+ Affected Homebrew GCC versions are: 12.4.0, 13.3.0 and 14.1.0.
+
+ Possibly tracked here: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108796
+ More info: https://github.com/llvm/llvm-project/issues/81767
+
+ Commit implementing the `availability` macro:
+ gcc-12: https://github.com/iains/gcc-12-branch/commit/fd5530b7cb0012bf4faeddd
+ 45e13054a1dfa6783
+ gcc-13: https://github.com/iains/gcc-13-branch/commit/cb7e4eca68cfc4763474e2e
+ b0935a844458842a8
+ gcc-14: https://github.com/iains/gcc-14-branch/commit/ff62a108865a6403f501738
+ 0d7018250c1d3306f
+
+ That applied to Homebrew GCC (12.4.0):
+ https://github.com/Homebrew/homebrew-core/commit/b904223d9893f62bec2a8f7483bf
+ 5992747fc6c7#diff-89dd0b4176eca7fcc24b591943509bf8a8d6ea904d71e5dfcd6b78fed62
+ fc574R44-R48
+
+ Ref: #13700
+ More info: https://github.com/curl/curl/pull/14091#issuecomment-2222703468
+
+ e91fcbac7d86292858718a0bfebad57978761af4 #14155 macos: undo `availability` ma
+ cro enabled by Homebrew gcc
+
+ 3. Proprietary Apple SDK macros
+
+ Apple SDK expects certain macros predefined by the compiler. Missing
+ them may causes odd issues. Mainline llvm is keeping up with Apple
+ clang, but it needs a fresh version, while the one installed on GitHub
+ runners is old (v15). I patched these in `lib/curl_setup.h`.
+
+ baa3270846b2a7307cbd0dd5c02c4e5f00e388dd #14134 build: fix llvm 16 or older +
+ Xcode 15 or newer, and gcc
+
+ 4. Apple SDK header bug
+
+ Without certain predefined macros, SDK headers can take a codepath where
+ it mis-defines its own `TARGET_OS_OSX` macro, which make it break its
+ own headers later. I patched it in `lib/curl_setup.h`.
+
+ ff784af461175584c73e7e2b65af00b1a5a6f67f #14159 build: fix llvm 17 and older
+ + macOS SDK 14.4 and newer
+
+ 5. `TargetConditionals.h` requires `sys/types.h`
+
+ Fixed in curl. It caused feature-detection failurs with autotools, and
+ could break builds in certain configurations.
+
+ e1f6192939c9c5ab2310b60bedf4c07d635193f6 #14130 configure: fix `SystemConfigu
+ ration` detection
+
+ 6. Differences between autotools and CMake compiler options
+
+ Fixed it by syncing compiler warning options.
+
+ 59cadacfcc1d39472245979cdbd614c7a9af6f0d #14128 build: sync warning options b
+ etween autotools, cmake & compilers
+
+ 7. Differences between autotools and CMake dependency detection
+
+ Fixed it by improving detection of libidn2, with some more fixes
+ pending for the next feature window.
+
+ f43adc2c4978f7f82a359e89186e58a31d17b0ad #14137 cmake: detect `libidn2` also
+ via `pkg-config`
+ Ref: #14136 cmake: detect `nghttp2` via `pkg-config`, enable by default
+
+ 8. libidn2 detection bug with CMake
+
+ Fixed the root cause and also the trigger in the CI config.
+
+ 764fbabf6ed4c1d36c0ab2033ac8df52d9923cd7 #14175 cmake: fix builds with detect
+ ed libidn2 lib but undetected header
+
+ 9. Suppressed compiler warnings inside Apple-specific curl code
+
+ Fixed these warnings, which allowed to stop silencing them.
+
+ b05dc7eb3592305de9fa85640767f3dd2a8d4c93 #14122 sectransp: fix `HAVE_BUILTIN_
+ AVAILABLE` checks to not emit warnings
+ 5fa534b0dacdc120aaab0766047e0ecac37be4b3 #14162 sectransp: fix clang compiler
+ warnings, stop silencing them
+
+ 10. CMake mis-detecting a CA bundle path on macOS
+
+ d2ef6255f4a040397d2f40ab7cbf65a203201cd9 #14182 cmake: sync CA bundle/path de
+ tection with autotools
+
+ 11. Failure to build tests with LibreSSL or wolfSSL with CMake
+
+ Fixed by dropping unnecessary includes, makign test builds dependent
+ on dependency headers.
+
+ 3765d75ce47b66289f946382b649d0e99389dc77 #14172 cmake: fix building `unit1600
+ ` due to missing `ssl/openssl.h`
+
+ 12. curl tests with CMake
+
+ curl's CMake was missing bits for running the C preprocessor accurately.
+ It made tests 1119 and 1167 fail. I implemented the missing bits.
+
+ efc2c5184d008fe2e5910fd03263e1ab0331d4e6 #14124 tests: include current direct
+ ory when running test Perl commands
+ c09db8b51b88ee6ad55bd637dcb4b47678e30906 #14129 cmake: create `configurehelp.
+ pm` like autotools does
+ 67cc1e3400b77536a3ca529c986247e1ef985e6e #14125 test1119: adapt for `.md` inp
+ ut
+
+ 13. GCC missing `__builtin_available()` support
+
+ curl source code assumes this is available to enable certain codepaths.
+ It's also intermixed with monotonic timer support.
+
+ 14. Monotonic timer support with GCC
+
+ Detected by GCC, while it probably shouldn't be. llvm/clang detects it
+ depending on target OS version. I've been playing with this, but so far
+ without a conclusion or fix.
+
+ 15. Runtime/test failures with GCC
+
+ I couldn't find the reason for most of this. A bunch of RTSP tests fail
+ with GCC. SecureTransport + HTTP/2 is failing a bunch of tests. With
+ OpenSSL it fails two of those. SecureTransport builds also fail one DoH
+ test.
+
+ 16. Runtime/test failure in llvm/clang
+
+ AppleIDN support received a fix with two more remaining.
+
+ fd0250869f7837e4a48d7e6f54cc0801ad3820e8 #14179 #14176 IDN: fix ß with Apple
+ IDN
+
+ 17. Other issues found and fixed while working on this:
+
+ 2c15aa5765900d4351e733671a1c8c3785beee1a GHA/macos: delete misplaced `
+ CFLAGS`, drop redundant CMake option
+ 80fb7c0bef209735ab352bf4afa31193a7bc65f1 #14126 configure: limit `SystemConfi
+ guration` test to non-c-ares, IPv6 builds
+ cfd6f43d6ca7e57670b422bab7bbf10221a2cf3e #14127 build: tidy up `__builtin_ava
+ ilable` feature checks (Apple)
+ bae555359979016999a9425a2d489f219a78abdd #14174 runtests: show name and keywo
+ rds for failed tests in summary
+ 09cdf7e5315711dea4ce7dcf5d99a4d41e7f658b #14178 cmake: delete unused `HAVE_LI
+ BSSH2`, `HAVE_LIBSOCKET` macros
+ d3595c74fab829f07ef44da1b7fc2f5668767020 #14186 configure: CA bundle/path det
+ ection fixes
+ 58772b0e082eda333e0a5fc8fb0bc7f17a3cd99c #14187 runtests: set `SOURCE_DATE_EP
+ OCH` to fix failing around midnight
+ 18f1cd7a77c4beecfd977d43f55634eb593ac99e #14183 tests: sync feature names wit
+ h `curl -V`
+ 4c22d97be786ed801e050da6872dd3143d6d0a59 #14181 build: use `#error` instead o
+ f invalid syntax
+
+ Pending merge:
+
+Daniel Stenberg (19 Jul 2024)
+
+- RELEASE-NOTES: synced
+
+Stefan Eissing (18 Jul 2024)
+
+- lib: send eos flag
+
+ Adds a `bool eos` flag to send methods to indicate that the data is the
+ last chunk the invovled transfer wants to send to the server.
+
+ This will help protocol filters like HTTP/2 and 3 to forward the
+ stream's EOF flag and also allow to EAGAIN such calls when buffers are
+ not yet fully flushed.
+
+ Closes #14220
+
+Bhanu Prakash (18 Jul 2024)
+
+- mbedtls: correct the error message for cert blob parsing failure
+
+ Closes #14224
+
+Daniel Stenberg (18 Jul 2024)
+
+- curldown: make 'added-in:' a mandatory header field
+
+ - generate AVAILABILITY manpage sections automatically - for consistent
+ wording
+
+ - allows us to double-check against other documumentation (symbols-in-version
+ s
+ etc)
+
+ - enables proper automation/scripting based on this data
+
+ - lots of them were wrong or missing in the manpages
+
+ - several of them repeated (sometimes mismatching) backend support info
+
+ Add test 1488 to verify "added-in" version numbers against
+ symbols-in-versions.
+
+ Closes #14217
+
+Stefan Eissing (18 Jul 2024)
+
+- doh: fix cleanup
+
+ When removing an easy handle that had DoH sub-easy handles going, those
+ were not removed from the multi handle. Their memory was reclaimed on
+ curl_easy_cleanup() of the owning handle, but multi still had them in
+ their list.
+
+ Add `Curl_doh_close()` and `Curl_doh_cleanup()` as common point for
+ handling the DoH resource management. Use the `multi` present in the doh
+ handles (if so), for removal, as the `data->multi` might already have
+ been NULLed at this time.
+
+ Reported-by: ç½—æœè¾‰
+ Fixes #14207
+ Closes #14212
+
+Daniel Stenberg (18 Jul 2024)
+
+- tests/scripts: call it 'manpage' (single word)
+
+ Mostly in comments
+
+ Closes #14216
+
+Alex Snast (18 Jul 2024)
+
+- http/3: resume upload on ack if we have more data to send
+
+ Currently we're waiting for sendbuf_len_in_flight to hit zero before
+ resuming upload which means we're blocking and waiting for _all_ acks to
+ arrive before sending more data. This causes significant delays especially
+ when ack delay is used on the server side.
+
+ The fix addresses several issues in h3 over ngtcp2:
+ - On ack we now call nghttp3_conn_resume_stream() when we have more
+ data to send.
+ - upload_left was incorrectly computed on CF_CTRL_DATA_DONE_SEND as
+ we need to subtract the ammount of data we have in flight.
+ - Remove upload_blocked_len as we Curl_bufq_write call will do the
+ right thing when called from cf_ngtcp2_send.
+
+ Fixes #14198
+ Closes #14209
+
+Daniel Stenberg (18 Jul 2024)
+
+- idn: make macidn fail before trying conversion if name too long
+
+ - double the max name length to 512 bytes
+
+ Closes #14215
+
+z2_ (18 Jul 2024)
+
+- idn: tweak buffer use when converting with macidn
+
+ Closes #14215
+
+Orgad Shaneh (18 Jul 2024)
+
+- lib: add failure reason on bind errors
+
+ Closes #14221
+
+Stefan Eissing (18 Jul 2024)
+
+- pytests: scorecard upload tests
+
+ - add upload tests to scorecard, invoke with
+ > python3 tests/http/scorecard.py -u h1|h2|h3
+ - add a reverse proxy setup from Caddy to httpd for
+ upload tests since Caddy does not have other PUT/POST handling
+ - add caddy tests in test_08 for POST/PUT
+ - increase read buffer in mod_curltest for larger reads
+
+ Closes #14208
+
+Viktor Szakats (18 Jul 2024)
+
+- runtests: fix newline glitch in FAIL details
+
+ Follow-up to bae555359979016999a9425a2d489f219a78abdd #14174
+
+- runtests: show name and keywords for failed tests in summary
+
+ Useful to see what the numbers listed in the `TESTFAIL:` and `IGNORED:`
+ lines mean. Also list test keywords to help catching failure patterns.
+
+ Example:
+ ```
+ FAIL 1034: 'HTTP over proxy with malformatted IDN host name' HTTP, HTTP GET,
+ HTTP proxy, IDN, FAILURE, config file
+ FAIL 1035: 'HTTP over proxy with too long IDN host name' HTTP, HTTP GET, HTTP
+ proxy, IDN, FAILURE
+
+ TESTFAIL: These test cases failed: 1034 1035
+ ```
+
+ Closes #14174
+
+Tal Regev (16 Jul 2024)
+
+- GHA/windows: add MSVC wolfSSL job with test
+
+ Fix the file of wolfssl.c because of this warning/error:
+ ```
+ curl\lib\vtls\wolfssl.c(1017,42): error C2220: the following warning is treat
+ ed as an error [curl\bld\lib\libcurl_object.vcxproj]
+ curl\lib\vtls\wolfssl.c(1017,42): warning C4267: 'function': conversion from
+ 'size_t' to 'unsigned long', possible loss of data [curl\bld\lib\libcurl_obje
+ ct.vcxproj]
+ ```
+
+ `size_t` in MSVC is different. Change it to `unsigned long` because
+ `wolfSSL_ERR_error_string_n` last argument is defined as
+ `unsigned long`.
+
+ Closes #14193
+
+Viktor Szakats (16 Jul 2024)
+
+- cmake: delete unused `HAVE_LIBSSH2`, `HAVE_LIBSOCKET` macros
+
+ - `HAVE_LIBSSH2`: unused in source. Not defined in CMake.
+
+ - `HAVE_LIBSOCKET`: unused in source. Used internally in CMake.
+
+ autotools sets them implicitly, so add them to the flag comparison
+ ignore-list.
+
+ Closes #14178
+
+- cmake: create `configurehelp.pm` like autotools does
+
+ Required by tests 1119 and 1167 to run a C preprocessor.
+
+ Tested OK: https://github.com/curl/curl/actions/runs/9915343826
+
+ Besides Apple, it also supports any gcc and clang builds, and MSVC.
+ For other platforms, it defaults to `cpp` (like autotools).
+
+ Follow-up to efc2c5184d008fe2e5910fd03263e1ab0331d4e6 #14124
+ Cherry-picked from #14097
+ Closes #14129
+
+- cmake: sync CA bundle/path detection with autotools
+
+ - skip the entire CA logic if no selected TLS backend support CA
+ certs/bundles.
+ Follow-up to 082bb41311a832ae1b83bb8fe1dfdefcf4e68ea5 #2545
+
+ - sync bundle path detection logic with `./configure`.
+
+ - fix to not auto-detect CA bundle/path on Windows.
+
+ - fix to reflect that BearSSL has CA bundle support.
+
+ - show the detected bundle path (as with the cert bundle).
+
+ - tidy up CMake syntax, fix typos in comments.
+
+ Closes #14182
+
+- configure: CA bundle/path detection fixes
+
+ - fix to not auto-detect CA bundle/path on Windows.
+
+ - two checks missed BearSSL, but they were only run for supported
+ TLS backends anyway. Delete these redundant checks.
+
+ - fix typos in a comment nearby.
+
+ Follow-up to 082bb41311a832ae1b83bb8fe1dfdefcf4e68ea5 #2545
+ Closes #14186
+
+- runtests: set `SOURCE_DATE_EPOCH` to fix failing around midnight
+
+ To make sure that `managen` called by test 1706 uses the same date as
+ the test expects in the `%DATE` macro.
+
+ Before this patch when tests started running before UTC midnight and
+ reached test 1706 after, these dates were different and the test failed.
+
+ Follow-up to 0e73b69b3dd6d174226c60406d3c4266754d70f8
+ Fixes #14173
+ Closes #14187
+
+- GHA/windows: verify 1448 2046 2047 in IDN Unicode jobs
+
+ These IDN tests pass with Unicode and fail without.
+
+ Follow-up to cb22cfca69bded45bf7f9c72c8e6764990490f11 #14077
+ Closes #14188
+
+- tests: sync feature names with `curl -V`
+
+ Some feature names used in tests had minor differences compared to
+ the well-known ones from `curl -V`. This patch syncs them to make test
+ results easier to grok.
+
+ Closes #14183
+
+Stefan Eissing (15 Jul 2024)
+
+- sendf: fix CRLF conversion of input
+
+ When CRLF line end conversion was enabled (--crlf), input after the last
+ newline in the upload buffer was not sent, if the buffer contained a
+ newline.
+
+ Reported-by: vuonganh1993 on github
+ Fixes #14165
+ Closes #14169
+
+- test2600: disable on win32
+
+ - disbable this test on WIN32 platforms. It uses the file describtor '1'
+ as valid socket without events. Not portable.
+ - reduce trace output somewhat on other runs
+
+ Fixes #14177
+ Reported-by: Viktor Szakats
+ Closes #14191
+
+- smtp: for starttls, do full upgrade
+
+ - make sure the TLS handshake after a successful STARTTLS command is
+ fully done before further sending/receiving on the connection.
+
+ Reported-by: tomy2105 on github
+ Fixes #14166
+ Closes #14190
+
+Daniel Stenberg (14 Jul 2024)
+
+- RELEASE-NOTES: synced
+
+Viktor Szakats (14 Jul 2024)
+
+- build: use `#error` instead of invalid syntax
+
+ It reduces configure log noise.
+
+ Follow-up to 20c1b2d75ee38189ffa75d21ed04108e1e0630ae #13287
+ Closes #14181
+
+Daniel Stenberg (14 Jul 2024)
+
+- libcurl-docs: make option lists alpha-sorted
+
+ The man pages for curl_easy_getinfo, curl_easy_setopt and
+ curl_multi_setopt now feature the lists of options alphabetically
+ sorted. Test 1139 verify that they are.
+
+ The curl_multi_setopt page also got brief explanations of the listed
+ options.
+
+ Closes #14156
+
+Christian Schmitz (14 Jul 2024)
+
+- IDN: fix ß with AppleIDN
+
+ Add flags UIDNA_NONTRANSITIONAL_TO_ASCII and
+ UIDNA_NONTRANSITIONAL_TO_UNICODE to encode ß correctly.
+
+ It fixes test 165.
+
+ Reported-by: Viktor Szakats
+ Bug: #14176
+ Closes #14179
+
+Viktor Szakats (14 Jul 2024)
+
+- cmake: fix builds with detected libidn2 lib but undetected header
+
+ It caused IDN to appear in `curl-config`, `libidn2` referenced from
+ `libcurl.pc`, fail to fallback to `pkg-config` detection. But libidn2
+ not actually used.
+
+ It came up in macOS CI builds after enabling cmake build tests. It
+ remained hidden for a while due to setting `-DUSE_APPLE_IDN=ON`.
+
+ (The half-detection of Homebrew libidn2 was the result of configuring
+ with `-DCMAKE_EXE_LINKER_FLAGS=-L$(brew --prefix)/lib`, to fix
+ linking GnuTLS that needs the `nettle` lib from the brew prefix.)
+
+ ```
+ FAIL 1014: [Compare curl --version with curl-config --features] curl-config
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9919357748/job/27405080722
+
+ Cherry-picked from #14097
+ Closes #14175
+
+- cmake: fix building `unit1600` due to missing `ssl/openssl.h`
+
+ In specific builds configs, cmake failed to build test `unit1600`,
+ due missing an OpenSSL (or wolfSSL) header.
+
+ The test code relies on `lib/curl_ntlm_core.h`, which in turn included
+ TLS library headers. But, dependency header directories are not setup
+ in cmake for tests, because they should not normally be needed.
+
+ The issue was hidden in most builds because TLS headers are usually
+ found under the system prefix. One counterexample is macOS + Homebrew
+ LibreSSL builds, where OpenSSL is purposefully unlinked from there to
+ avoid a mixup with LibreSSL that resides under its own prefix. It was
+ also hidden in autotools, possibly because it sets up header directories
+ globally, tests included.
+
+ The actual bug however is that `lib/curl_ntlm_core.h` should not include
+ TLS headers. None of its internal users need it, and `curl_ntlm_core.c`
+ included them already directly.
+
+ Fix it by deleting the TLS header includes from this internal header.
+
+ Fixes:
+ ```
+ In file included from curl/tests/unit/unit1600.c:27:
+ curl/lib/curl_ntlm_core.h:32:12: fatal error: 'openssl/ssl.h' file not found
+ # include <openssl/ssl.h>
+ ^~~~~~~~~~~~~~~
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9912684737/job/27388041520#ste
+ p:12:1694
+
+ Follow-up to 48eb71ade41d4b37f416b643063cab846ac027a2 #10322
+ Cherry-picked from #14097
+ Closes #14172
+
+- sectransp: fix clang compiler warnings, stop silencing them
+
+ Fix `-Wpointer-bool-conversion` warnings with the method suggested by
+ both Apple clang and mainline llvm. This was already tried and dropped
+ in #1705 (in year 2017), but the issue reported there no longer
+ replicates.
+
+ Verified with Apple clang 14, llvm 15, llvm 18 and gcc 11, 14 that the
+ generated objects are bit by bit identical before and after this patch.
+
+ Also:
+ - stop silencing `-Wtautological-pointer-compare`. This warning don't
+ seem to be appearing anymore (with or without this patch), at least
+ with the tested compilers and SDKs (clang 13.1.6-16.0.0beta, llvm 15,
+ 18, gcc 11, 14) and minimum macOS target of 10.8. Older targets fail
+ to build curl with SecureTransport.
+
+ - silence `-Wunreachable-code` for clang only. Previously I applied it
+ also to GCC, by mistake.
+ Ref: https://github.com/curl/curl/pull/12331/commits/8d7172d20a48ebc6c1b1d9
+ 4a76e2c5fb19dd9bfa
+
+ Apple clang `-Wpointer-bool-conversion`:
+ ```
+ curl/lib/vtls/sectransp.c:1103:6: error: address of function 'SSLCreateContex
+ t' will always evaluate to 'true' [-Werror,-Wpointer-bool-conversion]
+ if(SSLCreateContext) { /* use the newer API if available */
+ ~~ ^~~~~~~~~~~~~~~~
+ curl/lib/vtls/sectransp.c:1103:6: note: prefix with the address-of operator t
+ o silence this warning
+ if(SSLCreateContext) { /* use the newer API if available */
+ ^
+ &
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9819538439/job/27113201384#ste
+ p:8:382
+
+ llvm `-Wpointer-bool-conversion`:
+ ```
+ curl/lib/vtls/sectransp.c:2663:8: error: address of function 'SSLCreateContex
+ t' will always evaluate to 'true' [-Werror,-Wpointer-bool-conversion]
+ if(SSLCreateContext)
+ ~~ ^~~~~~~~~~~~~~~~
+ curl/lib/vtls/sectransp.c:2663:8: note: prefix with the address-of operator t
+ o silence this warning
+ if(SSLCreateContext)
+ ^
+ &
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9819538439/job/27113200291#ste
+ p:8:417
+
+ gcc still needs `-Waddress` suppressed to avoid these:
+ ```
+ curl/lib/vtls/n/sectransp.c: In function 'getsubject':
+ curl/lib/vtls/n/sectransp.c:379:6: warning: the address of 'SecCertificateCop
+ yLongDescription' will always evaluate as 'true' [-Waddress]
+ 379 | if(&SecCertificateCopyLongDescription)
+ | ^
+ [...]
+ ```
+
+ Follow-up to 59cadacfcc1d39472245979cdbd614c7a9af6f0d #14128
+ Follow-up to af271ce9b9717ba289417e9cbb7f278c2a12f959 #1722
+ Follow-up to 2b7ce3f56dfede107113c6de7d0ca457109d3eda #1706
+ Cherry-picked from #14097
+ Closes #14162
+
+- CI/circleci: config tidy-ups, bump up test parallelism
+
+ - bump parallel test for Linux jobs.
+ Credit-to: Dan Fandrich
+ Cherry-picked from #11510
+ - bump parallel test for macOS jobs.
+ - drop no longer necessary `-Wno-vla` option.
+ - fold long lines.
+ - drop `--enable-maintainer-mode` `./configure` option.
+ - replace a hard-coded prefix with `brew --prefix`.
+ - update documentation link.
+ - move `--enable-debug` in front.
+ - tidy up quotes.
+
+ Closes #14171
+
+- GHA/windows: re-add gsasl to MSVC jobs
+
+ Now that the package reached the CI runner image.
+
+ Follow-up to f99c08dba40307c07341013ff5f71fa8e3464ffc #14090
+ Follow-up to e26cbe20cbedbea0ca743dd33880517309315cb2 #13979
+
+ Closes #14170
+
+- tidy-up: adjust casing of project names
+
+ Mostly TLS/SSH project name.
+
+ Closes #14160
+
+Daniel Stenberg (12 Jul 2024)
+
+- ISSUE_TEMPLATE/docs: correct the field identifiers
+
+Stephen Farrell (12 Jul 2024)
+
+- doh: fix leak and zero-length HTTPS RR crash
+
+ This PR fixes a leak and a crash that can happen when curl encounters
+ bad HTTPS RR values in DNS. We're starting to do better testing of that
+ kind of thing and e.g. have published bad HTTPS RR values at
+ dodgy.test.defo.ie.
+
+ Closes #14151
+
+Daniel Stenberg (12 Jul 2024)
+
+- curl_global_init.md: polish the thread-safe wording
+
+ Since this has been thread-safe for two years now, few users actually
+ are hurt by the previous unsafe ways.
+
+ Closes #14158
+
+Viktor Szakats (12 Jul 2024)
+
+- GHA: FreeBSD 14.1, actions bump
+
+ - bump FreeBSD to 14.1
+
+ - update cross-platform-actions/action action to v0.25.0
+
+ Closes #14157
+ Closes #14164
+
+- build: fix llvm 17 and older + macOS SDK 14.4 and newer
+
+ Fixup faulty target macro initialization in macOS SDK since v14.4 (as of
+ 15.0 beta). The SDK target detection in `TargetConditionals.h` correctly
+ detects macOS, but fails to set the macro's old name `TARGET_OS_OSX`,
+ then continues to set it to a default value of 0. Other parts of the SDK
+ still rely on the old name, and with this inconsistency our builds fail
+ due to missing declarations. It happens when using mainline llvm older
+ than v18. Later versions fixed it by predefining these target macros,
+ avoiding the faulty dynamic detection. gcc is not affected (for now)
+ because it lacks the necessary dynamic detection features, so the SDK
+ falls back to a codepath that sets both the old and new macro to 1.
+
+ Also move the `TargetConditionals.h` include to the top of to make sure
+ including it also for c-ares builds, combined with SecureTransport or
+ other curl features that may call use an Apple SDK.
+
+ Before this patch, affected build combinations (e.g. in GHA runners,
+ llvm@15 + Xcode 15.3, 15.4, 16.0 with their default SDKs +
+ SecureTransport) fail with:
+ ```
+ error: use of undeclared identifier 'noErr'
+ or 'SecCertificateCopyLongDescription'
+ or 'SecItemImportExportKeyParameters'
+ or 'SecExternalFormat'
+ or 'SecExternalItemType'
+ or 'SEC_KEY_IMPORT_EXPORT_PARAMS_VERSION'
+ ```
+
+ Example:
+ ```
+ curl/lib/vtls/sectransp.c:311:18: error: use of undeclared identifier 'noErr'
+ OSStatus rtn = noErr;
+ ^
+ curl/lib/vtls/sectransp.c:379:7: error: use of undeclared identifier 'SecCert
+ ificateCopyLongDescription'
+ if(&SecCertificateCopyLongDescription)
+ ^
+ curl/lib/vtls/sectransp.c:381:7: error: call to undeclared function 'SecCerti
+ ficateCopyLongDescription'; ISO C99 and later do not support implicit functio
+ n declarations [-Werror,-Wimplicit-function-declaration]
+ SecCertificateCopyLongDescription(NULL, cert, NULL);
+ ^
+ curl/lib/vtls/sectransp.c:380:25: error: incompatible integer to pointer conv
+ ersion assigning to 'CFStringRef' (aka 'const struct __CFString *') from 'int
+ ' [-Wint-conversion]
+ server_cert_summary =
+ ^
+ [...]
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9893867519/job/27330135969#ste
+ p:10:22
+
+ llvm v18 patches implementing the predefined macros:
+ https://github.com/llvm/llvm-project/pull/74676
+ https://github.com/llvm/llvm-project/commit/6e1f19168bca7e3bd4eefda50ba03eac8
+ 441dbbf
+ https://github.com/llvm/llvm-project/pull/82833
+ https://github.com/llvm/llvm-project/commit/e5ed7b6e2fd368b722b6359556cd01258
+ 81e7638
+
+ Cherry-picked from #14097
+ Closes #14159
+
+- macos: undo `availability` macro enabled by Homebrew gcc
+
+ Homebrew gcc builds starting with 12.4.0, 13.3.0 and 14.1.0 enabled
+ the `availability` attribute.
+
+ This broke builds because the way the Apple SDK uses attributes (when
+ available) are incompatible with how gcc accepts them. Causing these
+ errors:
+ ```
+ error: attributes should be specified before the declarator in a function d
+ efinition
+ error: expected ',' or '}' before
+ ```
+
+ Upstream commits implementing the `availability` macro:
+ gcc-12: https://github.com/iains/gcc-12-branch/commit/fd5530b7cb0012bf4faeddd
+ 45e13054a1dfa6783
+ gcc-13: https://github.com/iains/gcc-13-branch/commit/cb7e4eca68cfc4763474e2e
+ b0935a844458842a8
+ gcc-14: https://github.com/iains/gcc-14-branch/commit/ff62a108865a6403f501738
+ 0d7018250c1d3306f
+
+ The project above is a Darwin gcc compatibility pack, that is applied
+ to Homebrew gcc builds.
+
+ This patch works by redefining the `availability` macro to an invalid
+ value, making `__has_attribute(availability)` checks fail, stopping
+ Apple SDK from inserting the incompatible attributes.
+
+ It also replaces the previous, local workaround for `lib/macos.c`.
+
+ Example with gcc 12.4.0 with macOS SDK 14.0 (Xcode 15.0.1):
+ ```
+ In file included from <path-to-sdk>/MacOSX14.0.sdk/System/Library/Frameworks/
+ CoreFoundation.framework/Headers/CoreFoundation.h:54,
+ from <path-to-sdk>/MacOSX14.0.sdk/System/Library/Frameworks/
+ SystemConfiguration.framework/Headers/SCDynamicStoreCopySpecific.h:30,
+ from /Users/runner/work/curl/curl/lib/macos.c:33,
+ from /Users/runner/work/curl/curl/build/lib/CMakeFiles/libcu
+ rl_shared.dir/Unity/unity_0_c.c:244:
+ <path-to-sdk>/MacOSX14.0.sdk/System/Library/Frameworks/CoreFoundation.framewo
+ rk/Headers/CFUserNotification.h:126:1: error: attributes should be specified
+ before the declarator in a function definition
+ 126 | CF_INLINE CFOptionFlags CFUserNotificationCheckBoxChecked(CFIndex i)
+ API_AVAILABLE(macos(10.0)) API_UNAVAILABLE(ios, watchos, tvos) {return ((CFOp
+ tionFlags)(1UL << (8 + i)));}
+ | ^~~~~~~~~
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9787982387/job/27025351601?pr=
+ 14096#step:7:18
+
+ The gcc vs. llvm/clang incompatibility possibly tracked here upstream:
+ https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108796
+ More info:
+ https://github.com/llvm/llvm-project/issues/81767
+ https://github.com/gcc-mirror/gcc/commit/8433baadec88e5f31fa141b6d78094e912
+ 56079d
+ https://discourse.llvm.org/t/changing-attribute-ast-printing-location-for-g
+ cc-compatibility/73215
+ https://reviews.llvm.org/D159362
+
+ Follow-up to db135f8d7207b20d531e7e2100a49f3e16bdcfab #14119
+ Ref: https://github.com/curl/curl/pull/14091#issuecomment-2222703468
+ Fixes #13700
+ Cherry-picked from #14097
+ Closes #14155
+
+Daniel Stenberg (11 Jul 2024)
+
+- ISSUE_TEMPLATE/docs: add a separate GitHub issue template for documentation
+
+ As such problems don't really fit the code related template
+
+ Closes #14161
+
+Dan Fandrich (11 Jul 2024)
+
+- DISTROS: add AlmaLinux package source link
+
+Viktor Szakats (11 Jul 2024)
+
+- GHA/windows: ignore FTP test results for old-mingw-w64 [ci skip]
+
+ Missed from previous commit. They are flaky here as well.
+
+ Follow-up to 0b81eccd22fb915aa6b679c0fd23a8a89332dc9e
+
+Daniel Stenberg (11 Jul 2024)
+
+- libcurl-easy.md: now *more* than 300 options
+
+ it previously said "almost 300".
+
+ Also cleaned up the language somewhat.
+
+ Closes #14153
+
+Martin Peck (10 Jul 2024)
+
+- MANUAL.md: wrap two example urls that overrun styling
+
+ Closes #14149
+
+renovate[bot] (10 Jul 2024)
+
+- GHA: update wolfSSL and mod_h2
+
+ - wolfSSL/wolfssl to v5.7.2
+ - icing/mod_h2 to v2.0.29
+
+ Closes #14131
+ Closes #14148
+
+Dominik PiÄ…tkowski (10 Jul 2024)
+
+- docs: start markdown headers with capital letter where applicable
+
+ Closes #14115
+
+CMD (10 Jul 2024)
+
+- hostip: skip error check for infallible function call
+
+ Closes #14147
+
+Daniel Stenberg (10 Jul 2024)
+
+- cf-socket: remove two "useless" assignments
+
+ 'nread' is already -1, no need to assign it again
+
+ Pointed out by CodeSonar
+
+ Closes #14145
+
+Viktor Szakats (10 Jul 2024)
+
+- cmake: detect `libidn2` also via `pkg-config`
+
+ Also:
+ - GHA/non-native: install `pkg-config` to detect libidn2 with cmake
+ on NetBSD and FreeBSD.
+ - GHA/non-native: tidy-up `curl --version` command if here.
+
+ Cherry-picked from #14097
+ Closes #14137
+
+- build: fix llvm 16 or older + Xcode 15 or newer, and gcc
+
+ Xcode v15 (2023) or newer requires the built-in macro
+ `__ENVIRONMENT_OS_VERSION_MIN_REQUIRED__`. This macro is missing from
+ mainline llvm versions released earlier. llvm v17 introduced it here:
+ https://github.com/llvm/llvm-project/commit/c8e2dd8c6f490b68e41fe663b44535a8a
+ 21dfeab
+
+ This patch defines the missing macro when the necessary conditions
+ align, by using the value via the macro's old name.
+
+ The issue affected SecureTransport builds: The SecureTransport code,
+ `lib/md4.c` and `lib/md5.c`.
+
+ Existing gcc versions (as of v14) also don't define this macro, so apply
+ the patch to it as well. Even though gcc is incompatible in other ways,
+ so this isn't fixing an actual curl build case that I could find yet.
+
+ GHA macOS runner images have llvm v15 pre-installed, which broke builds
+ when building with an affected Xcode:
+ ```
+ curl/lib/md4.c:80:14: error: '__ENVIRONMENT_OS_VERSION_MIN_REQUIRED__' is not
+ defined, evaluates to 0 [-Werror,-Wundef]
+ (__MAC_OS_X_VERSION_MIN_REQUIRED < 101500)) || \
+ ^
+ /Applications/Xcode_15.1.app/Contents/Developer/Platforms/MacOSX.platform/Dev
+ eloper/SDKs/MacOSX14.2.sdk/usr/include/AvailabilityInternal.h:40:53: note: ex
+ panded from macro '__MAC_OS_X_VERSION_MIN_REQUIRED'
+ #define __MAC_OS_X_VERSION_MIN_REQUIRED __ENVIRONMENT_OS_VERSION_
+ MIN_REQUIRED__
+ ^
+ In file included from curl/build/lib/CMakeFiles/libcurl_shared.dir/Unity/unit
+ y_0_c.c:250:
+ curl/lib/md5.c:75:14: error: '__ENVIRONMENT_OS_VERSION_MIN_REQUIRED__' is not
+ defined, evaluates to 0 [-Werror,-Wundef]
+ (__MAC_OS_X_VERSION_MIN_REQUIRED < 101500)) || \
+ ^
+ /Applications/Xcode_15.1.app/Contents/Developer/Platforms/MacOSX.platform/Dev
+ eloper/SDKs/MacOSX14.2.sdk/usr/include/AvailabilityInternal.h:40:53: note: ex
+ panded from macro '__MAC_OS_X_VERSION_MIN_REQUIRED'
+ #define __MAC_OS_X_VERSION_MIN_REQUIRED __ENVIRONMENT_OS_VERSION_
+ MIN_REQUIRED__
+ ^
+ 2 errors generated.
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9811974634/job/27095218578#ste
+ p:4:20
+
+ Cherry-picked from #14097
+ Closes #14134
+
+- build: tidy up `__builtin_available` feature checks (Apple)
+
+ - sync detection snippet between autotools and cmake
+ It wasn't causing issues, but it's useful to avoid unnecessary
+ differences while debugging.
+
+ - cmake: limit check to `APPLE`.
+
+ Ref: b05dc7eb3592305de9fa85640767f3dd2a8d4c93 #14122
+ Cherry-picked from #14097
+ Closes #14127
+
+- configure: limit `SystemConfiguration` test to non-c-ares, IPv6 builds
+
+ The framework this check detects is necessary for the function
+ `SCDynamicStoreCopyProxies()` used in `lib/macos.c`. Non-c-ares,
+ IPv6-enabled builds touch this codepath.
+
+ Limit the feature check for builds that actually need it.
+
+ It brings this in sync with CMake which already worked this way.
+
+ Cherry-picked from #14097
+ Closes #14126
+
+- configure: fix `SystemConfiguration` detection
+
+ Before this patch, `SystemConfiguration` detection failed due to this
+ error when compiling the detection snippet:
+ ```
+ /Applications/Xcode_15.3.app/Contents/Developer/Platforms/MacOSX.platform/Dev
+ eloper/SDKs/MacOSX.sdk/usr/include/TargetConditionals.h:140:50: error: missin
+ g binary operator before token "("
+ 140 | #if !defined(__has_extension) || !__has_extension(define_target_os_ma
+ cros)
+ | ^
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9821817534/job/27117929218#ste
+ p:6:1079
+
+ It occured with gcc-11 when combined with macOS SDK 14.4 and 14.5
+ (default SDKs in Xcode 15.3 and 15.4 respectively). It did not happen
+ with earlier releases.
+
+ Despite the failure in `./configure`, `lib/macos.c` compiled with
+ Apple's `TargetConditionals.h` just fine.
+
+ Turns out that including the `sys/types.h` header before the SDK
+ header fixes the error and makes the detection snippet compile.
+
+ Cherry-picked from #14097
+ Closes #14130
+
+- build: sync warning options between autotools, cmake & compilers
+
+ - cmake: enable Apple-specific `-Werror=partial-availability` to match
+ autotools.
+
+ - autotools: enable `-pedantic-errors` with llvm/clang to match gcc and
+ CMake.
+
+ - autotools: enable `-Werror-implicit-function-declaration` for
+ llvm/clang to match gcc.
+
+ - cmake: enable `-Werror-implicit-function-declaration` to match
+ autotools.
+
+ - move `-Wpointer-bool-conversion` from autotools to the local file
+ (`sectransp.c`) it was meant to apply. This way it applies to all
+ build methods.
+
+ - autotoos: show `CURL_CFLAG_EXTRAS` in the `./configure` summary.
+ (it may contain `-Werror` and/or `-pedentic-errors`.)
+
+ Cherry-picked from #14097
+ Closes #14128
+
+- CI: simplify running curl with DLLs
+
+ - update `PATH` instead of copying DLLs around.
+ - drop redundant `export` from `export PATH`.
+ - delete ending pathseps.
+
+ Closes #14143
+
+Alex Snast (9 Jul 2024)
+
+- wolfssl: use larger error buffer when formatting errors
+
+ Currently we're using WOLFSSL_MAX_ERROR_SZ to define the error buffer
+ size, this value is user defined which means it can be overwritten with
+ -DWOLFSSL_MAX_ERROR_SZ=512 when building wolfssl and this overwrite is
+ not exported to the users of wolfssl.
+
+ Instead of relying on WOLFSSL_MAX_ERROR_SZ we'll just use a 256 bytes
+ error buffer and use wolfSSL_ERR_error_string_n to fill it thus dropping
+ the dependency on WOLFSSL_MAX_ERROR_SZ altogether.
+
+ Closes #14114
+
+Viktor Szakats (9 Jul 2024)
+
+- CI: bump FreeBSD Python packages
+
+ Closes #14141
+
+- GHA/curl-for-win: don't run if only another CI was changed
+
+ Closes #14142
+
+Daniel Stenberg (9 Jul 2024)
+
+- RELEASE-NOTES: synced
+
+Stefan Eissing (9 Jul 2024)
+
+- vtls: replace addsessionid with set_sessionid
+
+ - deduplicate the code in many tls backends that check
+ for an existing id and delete it before adding the new one
+ - rename ssl_primary_config's `sessionid` bool to `cache_session`
+
+ Closes #14121
+
+Daniel Stenberg (9 Jul 2024)
+
+- test1175: scan libcurl-errors.md, not the generated .3 version
+
+ Closes #14133
+
+- test1139: scan .md files instead of .3 ones
+
+ As they are the canonical sources.
+
+ It still uses the curl.1 for command line option info.
+
+ Closes #14132
+
+Stefan Eissing (9 Jul 2024)
+
+- cf-socket: remove obsolete recvbuf
+
+ - recvbuf was never enabled, remove all its code
+ - remove `fdsave`ing the socket as that is not longer needed
+
+ Closes #14138
+
+Viktor Szakats (9 Jul 2024)
+
+- test1119: adapt for `.md` input
+
+ Replace logic dealing with `.3` files to handle the Markdown syntax.
+
+ Follow-up to eefcc1bda4bccd800f5a56a0fe17a2f44a96e88b #12730
+ Cherry-picked from #14097
+ Closes #14125
+
+- tests: include current directory when running test Perl commands
+
+ Necessary to find generated files in the out-of-tree build directory.
+ E.g. `tests/configurehelp.pm`, for tests 1119 and 1167.
+
+ Before this patch macOS autotools builds were failing these two tests
+ due to falling back to the default preprocessor (`cpp`) instead of
+ the actual one configured. Then `cpp` failing to compile Apple SDK
+ headers referenced by curl headers.
+
+ Cherry-picked from #14097
+ Closes #14124
+
+- configure: sort feature list, lowercase protocols, use backticks
+
+ - sort features case-insensitively to match `curl -V` and cmake.
+ `sort -f` is POSIX, but check if it's available anyway.
+
+ - make protocols lowercase to match `curl -V` and cmake.
+
+ - replace two outlier `$()` with backticks.
+
+ Closes #14117
+
+Yedaya Katsman (8 Jul 2024)
+
+- variable.md: make example use expand
+
+ I used double quotes since it seemed required for powershell, so this
+ example works in both (ba)sh and powershell as well as cmd.exe.
+
+ Closes #14118
+
+Andy Reitz (8 Jul 2024)
+
+- GIT-INFO.md: remove version requirements
+
+ Keep them in docs/INTERNALS.md
+
+ Bump lowest perl to 5.8
+
+ Closes #14112
+
+Viktor Szakats (8 Jul 2024)
+
+- sectransp: fix `HAVE_BUILTIN_AVAILABLE` checks to not emit warnings
+
+ `HAVE_BUILTIN_AVAILABLE` is a curl macro set via autotools and cmake.
+ Like other `HAVE_`s it signals availability if defined.
+
+ SecureTransport code was specifically looking for the value 1, which
+ triggered compiler warnings when the feature was not present.
+
+ Replace the existing workaround of locally suppressing the compiler
+ warning with using `defined()`.
+
+ autotools:
+ ```
+ 767 | #if (CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && HAVE_BUILTIN_AVAILAB
+ LE == 1
+ | ^~~~~~~~~~~~~~~~~~
+ ~~~~
+ ../../lib/vtls/sectransp.c: In function 'sectransp_connect_step1':
+ ../../lib/vtls/sectransp.c:1140:52: error: "HAVE_BUILTIN_AVAILABLE" is not de
+ fined, evaluates to 0 [-Werror=undef]
+ 1140 | #if (CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && HAVE_BUILTIN_AVAIL
+ ABLE == 1
+ | ^~~~~~~~~~~~~~~~~~
+ ~~~~
+ ../../lib/vtls/sectransp.c:1240:52: error: "HAVE_BUILTIN_AVAILABLE" is not de
+ fined, evaluates to 0 [-Werror=undef]
+ 1240 | #if (CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && HAVE_BUILTIN_AVAIL
+ ABLE == 1
+ | ^~~~~~~~~~~~~~~~~~
+ ~~~~
+ ../../lib/vtls/sectransp.c: In function 'sectransp_connect_step2':
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9815428701/job/27104448045#ste
+ p:6:499
+
+ cmake gcc:
+ ```
+ 1140 | #if (CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && HAVE_BUILTIN_AVAIL
+ ABLE == 1
+ | ^~~~~~~~~~~~~~~~~~
+ ~~~~
+ /Users/runner/work/curl/curl/lib/vtls/sectransp.c:1240:52: error: "HAVE_BUILT
+ IN_AVAILABLE" is not defined, evaluates to 0 [-Werror=undef]
+ 1240 | #if (CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && HAVE_BUILTIN_AVAIL
+ ABLE == 1
+ | ^~~~~~~~~~~~~~~~~~
+ ~~~~
+ /Users/runner/work/curl/curl/lib/vtls/sectransp.c: In function 'sectransp_con
+ nect_step2':
+ /Users/runner/work/curl/curl/lib/vtls/sectransp.c:2231:51: error: "HAVE_BUILT
+ IN_AVAILABLE" is not defined, evaluates to 0 [-Werror=undef]
+ 2231 | #if(CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && HAVE_BUILTIN_AVAILA
+ BLE == 1
+ | ^~~~~~~~~~~~~~~~~~~
+ ~~~
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9815428701/job/27104445425#ste
+ p:8:355
+
+ Cherry-picked from #14097
+ Closes #14122
+
+- examples: suppress deprecation warnings locally
+
+ Simplify making clean builds by silencing deprecation warnings inside
+ the example code where these may occur.
+
+ Drop related build tweaks/comments from GHA jobs.
+
+ Example warning:
+ ```
+ curl/docs/examples/postit2-formadd.c:65:16: error: 'CURLFORM_COPYNAME' is dep
+ recated: since 7.56.0. Use curl_mime_name() [-Werror=deprecated-declarations]
+ 65 | CURLFORM_COPYNAME, "sendfile",
+ | ^~~~~~~~~~~~~~~~~
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9841099503/job/27166970904#ste
+ p:10:829
+
+ Closes #14123
+
+- GHA/macos: bump parallel tests to -j5
+
+ Credit-to: Dan Fandrich
+ Cherry-picked from #11510 #14097
+
+- GHA/windows: usability improvements
+
+ - move `curl --version` into separate step.
+
+ - move configure log to separate step. Run on success, too.
+
+ - add step with `curl_config.h` dump (full and brief/sorted).
+
+ - make `autoreconf` a separate step.
+
+ - add each job configuration a short name.
+
+ - shorten job names.
+ Dedupe/drop redundant info, introduce abbreviations:
+ AM = autotools, CM = CMake, U = Unicode, R = Release, not -> `!`, etc.
+ Instead of mentioning `debug`, mentioned when it's not.
+
+ - simplify `PATH` forming for MSVC jobs.
+ It's sufficient to add the release binary directory of vcpkg, the debug one
+ is redundant.
+ Follow-up to e26cbe20cbedbea0ca743dd33880517309315cb2 #13979
+
+ - other minor tidy-ups.
+
+ Closes #14116
+
+- GHA/macos: delete misplaced `CFLAGS`, drop redundant CMake option
+
+ With macOS there is a long-term struggle with deprecation warnings.
+ In curl they occur with LDAP, SecureTransport and in docs/examples.
+
+ There are three ways to fix them:
+ - by CFLAGS `-Wno-deprecated-declarations` as a workaround.
+ - by CFLAGS `-mmacosx-version-min` set to a version where the the
+ feature was not deprecated.
+ - by CMake option `-DCMAKE_OSX_DEPLOYMENT_TARGET=`.
+
+ In GHA CMake jobs, all three were used, and `-mmacosx-version-min` was
+ set in a bogus way. Delete that bogus option, and delete the lone,
+ redundant CMake option too.
+
+ In a future commit I might replace the suppression option to properly
+ setting the target OS.
+
+ Follow-up to dfdd978f7c60224dffe2aac25b436dc0a5cd0186 #13491
+ Cherry-picked from #14097
+
+- macos: add workaround for gcc, non-c-ares, IPv6, compile error
+
+ Apple macOS SDK 13.0 and later are increasingly incompatible with gcc,
+ which started causing CI errors with the 20240701.9 revision of the
+ `macos-latest` (= `macos-14-arm64`) runner image.
+
+ This error is happening inside an Apple SDK header. We use the header
+ for calling a function in a resolver-related hack, in non-c-ares, IPv6
+ builds. You can avoid the problem by using c-ares or disabling IPv6
+ (or using clang, llvm, or a compatible gcc + SDK combination).
+
+ This patch fixes affected builds by declaring the ncessary framework
+ function manually, and not including the problematic header.
+
+ This workaround is ugly, doesn't cover all combinations, and fragile.
+
+ Other options are to disable this resolver-related hack for GCC, or to
+ replace it with a solution that doesn't rely on Apple SDK.
+
+ If you are aware of a stable fix or workaround, let us know.
+
+ gcc 12.4.0 + macOS SDK 14.0 (Xcode 15.0.1) error example:
+ ```
+ In file included from /Applications/Xcode.app/Contents/Developer/Platforms/Ma
+ cOSX.platform/Developer/SDKs/MacOSX14.0.sdk/System/Library/Frameworks/CoreFou
+ ndation.framework/Headers/CoreFoundation.h:54,
+ from /Applications/Xcode.app/Contents/Developer/Platforms/Ma
+ cOSX.platform/Developer/SDKs/MacOSX14.0.sdk/System/Library/Frameworks/SystemC
+ onfiguration.framework/Headers/SCDynamicStoreCopySpecific.h:30,
+ from /Users/runner/work/curl/curl/lib/macos.c:33,
+ from /Users/runner/work/curl/curl/build/lib/CMakeFiles/libcu
+ rl_shared.dir/Unity/unity_0_c.c:244:
+ /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Develope
+ r/SDKs/MacOSX14.0.sdk/System/Library/Frameworks/CoreFoundation.framework/Head
+ ers/CFUserNotification.h:126:1: error: attributes should be specified before
+ the declarator in a function definition
+ 126 | CF_INLINE CFOptionFlags CFUserNotificationCheckBoxChecked(CFIndex i)
+ API_AVAILABLE(macos(10.0)) API_UNAVAILABLE(ios, watchos, tvos) {return ((CFOp
+ tionFlags)(1UL << (8 + i)));}
+ | ^~~~~~~~~
+ /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Develope
+ r/SDKs/MacOSX14.0.sdk/System/Library/Frameworks/CoreFoundation.framework/Head
+ ers/CFUserNotification.h:127:1: error: attributes should be specified before
+ the declarator in a function definition
+ 127 | CF_INLINE CFOptionFlags CFUserNotificationSecureTextField(CFIndex i)
+ API_AVAILABLE(macos(10.0)) API_UNAVAILABLE(ios, watchos, tvos) {return ((CFOp
+ tionFlags)(1UL << (16 + i)));}
+ | ^~~~~~~~~
+ /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Develope
+ r/SDKs/MacOSX14.0.sdk/System/Library/Frameworks/CoreFoundation.framework/Head
+ ers/CFUserNotification.h:128:1: error: attributes should be specified before
+ the declarator in a function definition
+ 128 | CF_INLINE CFOptionFlags CFUserNotificationPopUpSelection(CFIndex n) A
+ PI_AVAILABLE(macos(10.0)) API_UNAVAILABLE(ios, watchos, tvos) {return ((CFOpt
+ ionFlags)(n << 24));}
+ | ^~~~~~~~~
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9787982387/job/27025351601?pr=
+ 14096#step:7:18
+
+ The exact conditions are fuzzy. Oddly enough gcc 12.3.0 and the SDK
+ same as above are _compatible_:
+ https://github.com/curl/curl/actions/runs/9791701214/job/27036037162
+
+ Also notice that similar errors can also happen in SecureTransport
+ builds, due to the SDK headers required.
+
+ Ref: https://github.com/curl/curl/pull/14097#issuecomment-2208639046
+ Ref: https://github.com/curl/curl/pull/14091#issuecomment-2205870854
+ Cherry-picked from #14097
+ Closes #14119
+
+- cmake: feature casing fix and tidy-ups
+
+ - fix casing of a feature (`Unicode`) in the feature list.
+ - sort TLS backends case-insensitively.
+ - sync feature/protocol list heading with `curl -V` and autotools.
+
+ Closes #14120
+
+- GHA: ignore FTP test result in Windows jobs
+
+ They are flaky.
+
+ E.g.:
+ - old-mingw-w64 7.3.0: 2001, 2039, 2083
+ - msvc: 1501, 593 (multiple)
+
+ Ref: https://github.com/curl/curl/pull/13599#issuecomment-2119372376
+ Cherry-picked from #14116
+
+- GHA: improve vcpkg cache, add BoringSSL ECH and LibreSSL MSVC jobs
+
+ - cache on a per-package basis.
+ Replace manual caching with a built-in solution. It shares cached
+ package builds between jobs, e.g. libssh2 only builds once
+ per platform (instead of once per job). Individual packages are built
+ as needed (not the whole per-job tree). It also fixes the duplicate
+ cache entry issues.
+ Ref: https://learn.microsoft.com/en-us/vcpkg/consume/binary-caching-github-
+ actions-cache
+ Follow-up to e26cbe20cbedbea0ca743dd33880517309315cb2 #13979
+ Follow-up to cb22cfca69bded45bf7f9c72c8e6764990490f11 #14077
+
+ - add BoringSSL job with ECH enabled. The first such job in the curl CI.
+
+ - add LibreSSL job.
+
+ - use vcpkg pre-installed on the runner image, instead of rolling our
+ own. This is quicker, simpler and more robust.
+ Follow-up to e26cbe20cbedbea0ca743dd33880517309315cb2 #13979
+
+ - show pre-installed vcpkg and ports version.
+
+ - drop `gsasl` dependency till it reaches the pre-installed vcpkg ports.
+
+ - re-add `find .` to see the binaries generated.
+
+ - simplify setting up `PATH`.
+
+ - exclude failing tests for any job enabling WinIDN.
+
+ - drop collecting and uploading log archives. We already dump CMake
+ logs, and our build doesn't use Ninja. Rest of files weren't generated
+ by the curl build. We don't aim to debug vcpkg package builds.
+
+ Closes #14090
+
+Tal Regev (7 Jul 2024)
+
+- GHA: add MSVC UWP job, expand jobs with more options
+
+ - add new dependencies: brotli, libpsl (requires libicu2) and gsasl.
+
+ - enable WinIDN in a job. Exclude failing tests.
+
+ - add UWP job and fix the build logic to support it.
+
+ - increase timeouts to build the new dependencies.
+
+ Assisted-by: Viktor Szakats
+ Closes #14077
+
+Dan Fandrich (6 Jul 2024)
+
+- tests: fix sshd UserKnownHostsFile path for MinGW/Cygwin
+
+ This is the same thing as the previous commit fd194f46 but on the next
+ line.
+
+ Follow-up to 70d2fca2
+
+ Ref: #10818
+
+- tests: fix sshd IdentityFile path for MinGW/Cygwin
+
+ This was missed during some refactoring more than a year ago and is
+ causing a warning "Use of uninitialized value $path in pattern match".
+
+ Follow-up to 70d2fca2
+
+ Ref: #10818
+ Closes #14113
+
+Viktor Szakats (7 Jul 2024)
+
+- build: add Debug, TrackMemory, ECH to feature list
+
+ Also:
+
+ - remove stray `ECH` and `HTTPSRR` from cmake protocol list.
+
+ - stop excluding `Debug` and `TrackMemory` in `test1013.pl`.
+
+ - configure: delete `CURL_CHECK_CURLDEBUG` check.
+ Ref: 065047dc62cba3efde597fa5420d112fc2f4c500
+ This check was effectively doing nothing, except disabling
+ `--enable-curldebug` in `curl-config` for
+ Cygwin/MSYS/cegcc/OS2/AIX targets with c-ares enabled.
+
+ Closes #14096
+
+Dan Fandrich (5 Jul 2024)
+
+- CI: bump the libc6 on the linux-old build
+
+ This contains some security fixes for nscd.
+
+Viktor Szakats (6 Jul 2024)
+
+- reuse: fix typo in comment
+
+ Follow-up to 9104bad82004d908e1fe66a425f8ca78f975045d #14107
+
+Dan Fandrich (5 Jul 2024)
+
+- CI: Fix typo in comment
+
+- curl: follow-up to fix categories in --help
+
+ The commit 6483813b was missing changes necessitated by 2abfc75 that
+ causes a crash. Also, use ARRAYSIZE() for cleaner code.
+
+ Follow-up to 6483813b
+
+ Ref #14055
+
+- curl: list categories in --help
+
+ This eliminates the need to run an extra help subcommand to get the
+ possible categories, reducing the friction in getting relevant help. The
+ help wording was also slightly tweaked for grammatical accuracy.
+
+ Closes #14055
+
+Daniel Stenberg (5 Jul 2024)
+
+- RELEASE-NOTES: synced
+
+renovate[bot] (5 Jul 2024)
+
+- GHA: update actions/upload-artifact and actions/download-artifact
+
+ update actions/upload-artifact digest to 0b2256b
+ update actions/download-artifact digest to fa0a91b
+
+ Closes #14111
+ Closes #14110
+
+Max Mehl (5 Jul 2024)
+
+- reuse: switch to REUSE 3.2 and REUSE.toml
+
+ - remove scripts/copyright.pl
+
+ Closes #14107
+
+Yedaya Katsman (5 Jul 2024)
+
+- curl: move more options to deprecated category
+
+ --no-npn, --sslv2, --sslv3
+
+ Closes #14109
+
+Stefan Eissing (5 Jul 2024)
+
+- multi: pollset assertion only when IP connected
+
+ Give warning for an empty pollset only when the connection has at least
+ IP connectivity. There are cases where the connect in QUIC makes another
+ attempt on a timeout and no socket will be available during that.
+
+ Closes #14108
+
+Daniel Stenberg (5 Jul 2024)
+
+- cmdline-opts: category cleanup
+
+ Option cleanups:
+
+ --get is not upload
+ --form* are post
+ - added several options into ldap, smtp, imap and pop3
+ - shortened the category descriptions in the list
+
+ category curl fixes:
+
+ --create-dirs removed from 'curl'
+ --ftp-create-dirs removed from 'curl'
+ --netrc moved to 'auth' from 'curl'
+ --netrc-file moved to 'auth' from 'curl'
+ --netrc-optional moved to 'auth' from 'curl'
+ --no-buffer moved to 'output' from 'curl'
+ --no-clobber removed from 'curl'
+ --output removed from 'curl'
+ --output-dir removed from 'curl'
+ --remove-on-error removed from 'curl'
+
+ Add a "global" category:
+
+ - Made all "global" options set this category
+
+ Add a "deprecated" category:
+
+ - Moved the deprecated options to it (maybe they should not be in any
+ category long term)
+
+ Add a 'timeout' category
+
+ - Put a number of appropriate options in it
+
+ Add an 'ldap' category
+
+ - Put the LDAP related option in there
+
+ Remove categories "ECH" and "ipfs"
+
+ - They should not be categories. Had only one single option each.
+
+ Remove category "misc"
+
+ - It should not be a category as it is impossible to know when to browse
+ it.
+
+ --use-ascii moved to ftp and output
+ --xattr moved to output
+ --service-name moved to auth
+
+ Managen fixes:
+
+ - errors if an option is given a category name that is not already setup
+ for in code
+
+ - verifies that options set `scope: global` also is put in category
+ `global´
+
+ Closes #14101
+
+Stefan Eissing (5 Jul 2024)
+
+- GHA: configure OpenSSL's libdir as 'lib' only
+
+ Also mention in HTTP3.md
+
+ OpenSSL has a bug that messes the config `--libdir=path` to become the
+ wrong path in its pkgconfig files. If we just pass `--libdir=lib` it
+ should avoid this.
+
+ Ref: #14099
+ See also: https://github.com/openssl/openssl/issues/23569
+
+ Closes #14102
+
+Daniel Stenberg (5 Jul 2024)
+
+- tool_operate: simplify return code handling from url_proto()
+
+ The additional checks were superfluous as it would only ever return
+ error if one of those protocols were set. Also: a returned error
+ *should* mean get out of there, without having to check more conditions.
+
+ Closes #14104
+
+- tool_operate: check for --disable case *sensitively*
+
+ curl command line options are specified with the correct casing or they
+ don't match
+
+ Closes #14103
+
+Stefan Eissing (4 Jul 2024)
+
+- transfer: avoid polling socket every transfer loop
+
+ Improve download performance, minimal effort.
+
+ Do not poll the socket for pending data every transfer loop iteration.
+ This gives 10-20% performance gains on large HTTP/1.1 downloads (on my
+ machine).
+
+ Closes #14098
+
+Viktor Szakats (4 Jul 2024)
+
+- tests: delete `CharConv` remains
+
+ Closes #14100
+
+- GHA: bump macOS CMake job parallelism to 4 (nproc+1) [ci skip]
+
+ To match autotools ones and the rest of workflows.
+
+ Follow-up to 464282ddfb214917be3d143c035f178f3b77f209 #13807
+
+Yedaya Katsman (4 Jul 2024)
+
+- help: add flags to output and ssh categories
+
+ - Add --output, --remove-on-error, --output-dir and --created-dirs to
+ the output help category
+
+ - Add --hostpubmd5, --hostpubsha256, --insecure (-k), and --pubkey to
+ the ssh help category
+
+ Closes #14076
+
+Stefan Eissing (4 Jul 2024)
+
+- TODO: remove item about 'SSL_peak'
+
+ GnuTLS todo item about using an equivalent of `SSL_peak()`, which
+ nicely escaped the word checks, is no longer relevant.
+
+ We do not use `SSL_peek()` anymore since connection filters were
+ introduced.
+
+ Closes #14091
+
+renovate[bot] (4 Jul 2024)
+
+- GHA: update dependency gnutls/gnutls to v3.8.6
+
+ Closes #14094
+
+- GHA: update fsfe/reuse-action action to v4
+
+ Closes #14095
+
+Viktor Szakats (3 Jul 2024)
+
+- GHA: Windows job exclusions tweaks
+
+ - disable SMTP tests in MSYS2/mingw-w64 and MSVC jobs.
+ On the suspicion of sometimes hanging:
+ https://github.com/curl/curl/actions/runs/9346162475/job/25720437944?pr=138
+ 55#step:14:2838
+ https://github.com/curl/curl/actions/runs/9758011305/job/26931678639?pr=140
+ 84#step:14:2834
+ https://github.com/curl/curl/actions/runs/9774468536/job/26982805294#step:1
+ 1:4731
+
+ - run TFTP, MQTT, WebSockets tests in MSYS2/msys jobs again.
+
+ - switch hanging old-mingw-w64 7.3.0 job to Release (from Debug).
+ Guessing here, 9.5.0 is more solid, and one difference is
+ Debug/Release mode. Let's match 7.3.0 with that and see how it changes
+ hangs and flakiness.
+ The other difference is Unicode ON in 7.3.0. Flaky 6.3.0 was also
+ Debug, with Unicode OFF:
+ 217878bade884202ee5fb2e80186c5fd130392e8 #13566.
+ (Unicode unlikely to play a role here IMO.)
+ If 7.3.0 keeps hanging / remains flaky I'll consider disabling its
+ test runs.
+
+ - opt-out from vcpkg telemetry.
+
+ Ref: https://github.com/curl/curl/pull/13599#issuecomment-2119372376
+ Closes #14085
+
+renovate[bot] (3 Jul 2024)
+
+- Dockerfile: update debian:bookworm-slim to 39868a6
+
+ Closes #14083
+
+Daniel Stenberg (3 Jul 2024)
+
+- FEATURES.md: refresh
+
+ - added lots of missing stuff
+ - rearranged a little
+ - remove all footnotes
+
+ Closes #14086
+
+- RELEASE-NOTES: synced
+
+- curl_easy_perform.md: call it network transfer, not file transfer
+
+Viktor Szakats (2 Jul 2024)
+
+- winbuild: MS-DOS batch tidy-ups
+
+ - prefer `.bat` extension over `.cmd` for MS-DOS batch, which also
+ avoids confusion with OS/400 `.cmd` files.
+ - cleanup `echo` quotes, drop them consistently.
+ - delete empty output line from one of the error branches.
+ - prefer lowercase commands like the rest of MS-DOS batches.
+ - delete a contraction.
+ - drop backticks from error message.
+ - use `nmake.exe` consistently.
+ - use equal/not-equal operator style consistently.
+ - inline a single-line `if` branch.
+ - delete exceptions and rules dealing with Windows `.cmd` extension.
+
+ Closes #14084
+
+Stefan Eissing (2 Jul 2024)
+
+- multi: fix pollset during RESOLVING phase
+
+ - add a DEBUGASSERT for when a transfer's pollset should not be empty.
+ - move write unpausing from transfer loop into curl_easy_pause. This
+ make sure that the url_updatesocket() finds the correct state when
+ updating socket events.
+ - fix HTTP/2 proxy during connect phase to set sockets correctly
+ - fix test2600 to simulate a socket set
+ - move write unpausing from transfer loop into curl_easy_pause. This
+ make sure that the url_updatesocket() finds the correct state when
+ updating socket events.
+ - waiting for the resolver to deliver might not involve any sockets to
+ wait for. Do not generate a warning.
+
+ Fixes #14047
+ Closes #14074
+
+Daniel Stenberg (2 Jul 2024)
+
+- cmdline-opts: shorten six help texts
+
+ o --location-trusted
+ o --next
+ o --parallel-immmediate
+ o --pinnedpubkey
+ o --proxy-pass
+ o --proxy-ssl-allow-beast
+
+ Closes #14075
+
+- managen: fix removing backticks from subtitles
+
+ It erroneously removed them from the wrong variable.
+
+ Closes #14081
+
+Viktor Szakats (2 Jul 2024)
+
+- cmake: show protocols, then features
+
+ To match the order used by `curl -V` and `./configure`.
+
+ Closes #14082
+
+- cmdline-docs: fix `--proxy-ca-native` example + tidy-ups
+
+ Also:
+ - fix an indentation.
+ - fix capitalized option in comment.
+
+ Closes #14078
+
+- cmake: sync protocol/feature list with `curl -V` output
+
+ - sort features case-insensitively.
+ Requires CMake v3.13.0.
+ Follow-up to 0f26abeef1dd1d1a02f8e12dbc3d51e73e9d2e9c #14063
+
+ - convert protocol list to lowercase.
+ But leave it uppercase in `curl-config`.
+
+ Closes #14066
+
+- GHA/badwords.yml: fixup indent for yamllint [ci skip]
+
+renovate[bot] (1 Jul 2024)
+
+- GHA: update dependency awslabs/aws-lc to v1.31.0
+
+ Closes #14080
+
+Daniel Stenberg (1 Jul 2024)
+
+- GHA/badwords.yml: check source code wording
+
+ Closes #14073
+
+- code: language cleanup in comments
+
+ Based on the standards and guidelines we use for our documentation.
+
+ - expand contractions (they're => they are etc)
+ - host name = > hostname
+ - file name => filename
+ - user name = username
+ - man page => manpage
+ - run-time => runtime
+ - set-up => setup
+ - back-end => backend
+ - a HTTP => an HTTP
+ - Two spaces after a period => one space after period
+
+ Closes #14073
+
+Yedaya Katsman (1 Jul 2024)
+
+- docs: add RELEASE-TOOLS.md.dist to .gitignore
+
+ Closes #14079
+
+Viktor Szakats (1 Jul 2024)
+
+- libcurl.pc: add more `Requires.private`/`Requires` dependencies
+
+ - add `libmsh3` reference from cmake and autotools.
+
+ - add `mit-krb5-gssapi` reference from cmake.
+
+ It leaves GSS not set from autotools. The handling of heimdal in cmake
+ is fuzzy, that's probably missing too.
+
+ Follow-up to f057de5a1a950a90d1920021db152a4b695f1a8a #13911
+ Closes #14072
+
+- cmake: improve wolfSSL detection
+
+ - support detecting wolfSSL via pkg-config (like autotools.)
+
+ - detect wolfSSL version.
+
+ - detect `HAVE_WOLFSSL_DES_ECB_ENCRYPT`.
+ (needs e.g. `--enable-curl` when building wolfSSL)
+
+ - detect `HAVE_WOLFSSL_FULL_BIO` and enable HTTPS-proxy feature.
+ (needs e.g. `--enable-opensslall` when building wolfSSL)
+
+ - fix to show `HTTPS-proxy` in cmake feature list.
+ Ref: 55807e6c056f27846d70cec70ee6ac3f0e5b3bbe #9962
+
+ - fix to show `NTLM` in cmake feature list.
+
+ - fix to show `smb` and `smbs` in cmake protocol list.
+
+ - add wolfSSL CMake job to GHA (for macOS).
+
+ - fix mqtt and wolfSSL symbol clash.
+ ```
+ ./curl/lib/mqtt.c: In function 'mqtt_doing':
+ ./curl/lib/mqtt.c:746:17: error: declaration of 'byte' shadows a global dec
+ laration [-Werror=shadow]
+ 746 | unsigned char byte;
+ | ^~~~
+ /opt/homebrew/Cellar/wolfssl/5.7.0_1/include/wolfssl/wolfcrypt/types.h:85:3
+ 6: note: shadowed declaration is here
+ 85 | typedef unsigned char byte;
+ | ^~~~
+ ```
+
+ - format `FindWolfSSL.cmake` closer to neighbours.
+
+ Closes #14064
+
+Daniel Stenberg (1 Jul 2024)
+
+- curl_url_set: elaborate on scheme guessing
+
+ Explain a little more and refer to the CURLU_NO_GUESS_SCHEME flag
+ for getting scheme or URL.
+
+ Closes #14071
+
+- docs: misc language polish
+
+ - CURLINFO_FILETIME*: improve language
+ - add '32bit' and '64bit' as bad words, use 32-bit and 64-bit
+ - mksymbolsmanpage.pl: avoid "will"
+
+ Closes #14070
+
+- curl_easy_escape: elaborate a little on encoding a URL
+
+ Closes #14069
+
+Viktor Szakats (1 Jul 2024)
+
+- cmake: fix feature and protocol lists for SecureTransport
+
+ NTLM was missing from the features list, and SMB/SMBS from
+ the protocols list in SecureTransport builds.
+
+ Follow-up to 76a9c3c4be10b3d4d379d5b23ca76806bbae536a #3619
+
+ Reported-by: Tal Regev
+ Bug: https://github.com/curl/curl/pull/13963#issuecomment-2178791390
+ Closes #14065
+
+Daniel Stenberg (1 Jul 2024)
+
+- curl_str[n]equal.md: tidy up text to make them stand-alone
+
+ Previously this was one single manpage for two functions but as they are
+ two separate ones since a while back, they should each clearly document
+ their single specific functions.
+
+ Follow-up to eefcc1bda4bc
+
+ Closes #14068
+
+- RELEASE-NOTES: synced
+
+Tal Regev (30 Jun 2024)
+
+- GHA: use vcpkg to install packages for MSVC jobs
+
+ - enable new dependencies for existing jobs.
+
+ - add cache for vcpkg packages.
+
+ - tidy-up CMake options and environment for vcpkg.
+
+ Closes #13979
+
+Daniel Stenberg (30 Jun 2024)
+
+- curl_mprintf.md: add missing comma
+
+- CURLOPT_TLSAUTH_PASSWORD/USERNAME.md: language fixups
+
+ - relies *on* TLS SRP
+ - *for* the specific TLS backends
+
+ Closes #14061
+
+- docs/libcurl: polish the single-line descriptions
+
+ - use imperative form
+ - use lowercase
+ - no period
+ - unify some phrases
+ - fix curl_multi_socket and curl_multi_socket_all to keep their own
+ descriptions
+
+ Closes #14062
+
+Viktor Szakats (30 Jun 2024)
+
+- cmake: alpha-sort feature list
+
+ Like autotools does.
+
+ Closes #14063
+
+renovate[bot] (29 Jun 2024)
+
+- GHA: update github/codeql-action digest to b611370
+
+ Closes #14058
+
+Tatsuhiro Tsujikawa (29 Jun 2024)
+
+- vquic: fix UDP_GRO struct cmsghdr data type
+
+ The data type for UDP_GRO in struct cmsghdr is int. Limit the usage of
+ UDP_GRO to linux only because it is not portable.
+
+ Closes #14056
+
+Sertonix (29 Jun 2024)
+
+- mk-ca-bundle.pl: delay 'curl -V' execution until it is needed
+
+ Avoid an `Can't exec "curl"` message when curl is not actually needed.
+
+ Closes #14060
+
+Daniel Stenberg (29 Jun 2024)
+
+- src/Makefile.am: remove SUBDIRS assignment
+
+ It was once used to continue into ../docs but is just leftovers now.
+
+ Closes #14054
+
+z2_ (28 Jun 2024)
+
+- x509asn1: remove superfluous free()
+
+Stefan Eissing (28 Jun 2024)
+
+- ngtcp2+quictls: fix cert-status use
+
+ - add test for --cert-status on all http versions
+
+ Reported-by: Dexter Gerig
+ Fixes #14049
+ Closes #14050
+
+Daniel Stenberg (28 Jun 2024)
+
+- RELEASE-PROCEDURE.md: update release date
+
+- managen: insert final .fi for files ending with a quote
+
+ When an individual file ended with a quote (typically an example), the
+ render function would return without ending the quote correctly with a
+ ".fi" (fill in) in the manpage output.
+
+ This made the additional text provided below to render wrongly.
+
+ Closes #14048
+
+Junho Choi (28 Jun 2024)
+
+- quic: update to quiche 0.22.0
+
+ quiche 0.22.0 will set SONAME in libquiche.so (libquiche.so.0) for
+ linux/BSDs. Install a symlink with SONAME.
+
+ Closes #14030
+ Closes #14046
+
+Daniel Stenberg (28 Jun 2024)
+
+- managen: introduce "Multi: per-URL"
+
+ For -O, -o and -T that are used once per specified URL.
+
+ Closes #14045
+
+- quiche: fix operand of ‘?:’ changes signedness
+
+ ... from ‘int’ to ‘curl_uint64_t’
+
+ Closes #14041
+
+- GHA: add --enable-werror to the quiche job
+
+ Closes #14041
+
+- KNOWN_BUGS: three new bugs
+
+ These have lingered in the issue tracker for a long time without action.
+ We don't expect any fixes in the near term either. Move them to the
+ KNOWN_BUGS document.
+
+ Closes #12177
+ Closes #12171
+ Closes #13350
+
+ Closes #14042
+
+Viktor Szakats (27 Jun 2024)
+
+- CI: add whitespace checker
+
+ Fix issues detected.
+
+ Also:
+
+ - One of the `.vc` files used LF EOLs, while the other didn't.
+ Make that one also use LF EOLs, as this is apparently supported by
+ `nmake`.
+
+ - Drop `.dsw` and `.btn` types from `.gitattributes`.
+ The repository doesn't use them.
+
+ - Sync section order with the rest of files in
+ `tests/certs/EdelCurlRoot-ca.prm`.
+
+ - Indent/align `.prm` and `.pem` files.
+
+ - Delete dummy `[something]` section from `.prm` and `.pem` files.
+
+ Mental note:
+ MSVC `.sln` files seem to accept spaces for indentation and also support
+ LF line-endings. I cannot test this and I don't know what's more
+ convenient when updating them, so left them as-is, with specific
+ exclusions.
+
+ Closes #14031
+
+- CI: fix typo in job name
+
+ Closes #14040
+
+Stefan Eissing (27 Jun 2024)
+
+- tests/httpd: adjust ReadBufferSize for better performance
+
+ - list httpd and caddy versions in scorecard run
+
+ Closes #14039
+
+Daniel Stenberg (27 Jun 2024)
+
+- runtests: fix %VERNUM
+
+ It needs to be set to the leading digits and dots only, so that the
+ `-[date]` suffix strings are not included, as those used in the daily
+ snapshots.
+
+ Fixes #14035
+ Reported-by: Marcel Raad
+ Closes #14036
+
+Philip Heiduck (27 Jun 2024)
+
+- CI/synopsis.yml: run on `.md` files
+
+ Reported-by: Viktor Szakats
+ Fixes #14032
+ Closes #14037
+
+Daniel Stenberg (27 Jun 2024)
+
+- verify-synopsis.pl: work with .md files
+
+ Ref: #14037
+ Closes #14038
+
+- conncache: done always evaluates to false
+
+ Follow-up to c9b95c0bb30f88bf00e1ac
+
+ Spotted by CodeSonar
+
+ Reviewed-by: Stefan Eissing
+ Closes #14034
+
+- lib: add a few DEBUGASSERT(data) to aid code analyzers
+
+ ... where 'data' is assumed to always work.
+
+ Closes #14033
+
+- RELEASE-NOTES: synced
+
+Viktor Szakats (26 Jun 2024)
+
+- tidy-up: use `/usr/bin/env perl` shebang
+
+ Most Perl scripts already used it. Sync up the few outliers.
+
+ Closes #14029
+
+Stefan Eissing (26 Jun 2024)
+
+- quic: openssl quic, cmake and doc version update to 3.3.0
+
+ Closes #14028
+
+- http/3: add shutdown support
+
+ - openssl-quic shutdown handling
+ - ngtcp2 shutdown handling
+ - quiche shutdown handling
+ - add test_19_06 for verfication
+
+ Reported-by: Dexter Gerig
+ Closes #14027
+ Fixes #14022
+
+Daniel Stenberg (26 Jun 2024)
+
+- tests: verify managen
+
+ 1705: verifies the manpage output
+
+ 1706: verifies the ascii output
+
+ Closes #14025
+
+- runtests: support %DATE for YYYY-MM-DD of right now
+
+- runtests: support %VERNUM
+
+ For the plain version number of the built curl without -DEV etc. Only
+ digits and dots.
+
+- managen: only output .RE for manpage output
+
+ For ascii they are just rubbish.
+
+ Closes #14025
+
+Tatsuhiro Tsujikawa (26 Jun 2024)
+
+- quic: enable UDP GRO
+
+ Closes #14012
+
+Stefan Eissing (26 Jun 2024)
+
+- quic: require at least OpenSSL 3.3 for QUIC
+
+ - when checking for QUIC support in OpenSSL, also check
+ for it being at least 3.3.0
+ - remove workarounds for features buggy or missing in 3.2
+
+ Closes #14026
+
+Daniel Stenberg (26 Jun 2024)
+
+- FILEFORMAT.md: mentioned <file[num]> for "client"
+
+ They can be used to create more files.
+
+ Closes #14024
+
+Marcel Raad (26 Jun 2024)
+
+- system_win32: add missing curl.h include
+
+ It's required for `CURLcode`.
+
+ Closes https://github.com/curl/curl/pull/14019
+
+Daniel Stenberg (26 Jun 2024)
+
+- TODO: specify which response codes that make -f/--fail return error
+
+ Suggestion from the user survey 2024
+
+ Closes #14020
+
+Stefan Eissing (26 Jun 2024)
+
+- lib: graceful connection shutdown
+
+ When libcurl discards a connection there are two phases this may go
+ through: "shutdown" and "closing". If a connection is aborted, the
+ shutdown phase is skipped and it is closed right away.
+
+ The connection filters attached to the connection implement the phases
+ in their `do_shutdown()` and `do_close()` callbacks. Filters carry now a
+ `shutdown` flags next to `connected` to keep track of the shutdown
+ operation.
+
+ Filters are shut down from top to bottom. If a filter is not connected,
+ its shutdown is skipped. Notable filters that *do* something during
+ shutdown are HTTP/2 and TLS. HTTP/2 sends the GOAWAY frame. TLS sends
+ its close notify and expects to receive a close notify from the server.
+
+ As sends and receives may EAGAIN on the network, a shutdown is often not
+ successful right away and needs to poll the connection's socket(s). To
+ facilitate this, such connections are placed on a new shutdown list
+ inside the connection cache.
+
+ Since managing this list requires the cooperation of a multi handle,
+ only the connection cache belonging to a multi handle is used. If a
+ connection was in another cache when being discarded, it is removed
+ there and added to the multi's cache. If no multi handle is available at
+ that time, the connection is shutdown and closed in a one-time,
+ best-effort attempt.
+
+ When a multi handle is destroyed, all connection still on the shutdown
+ list are discarded with a final shutdown attempt and close. In curl
+ debug builds, the environment variable `CURL_GRACEFUL_SHUTDOWN` can be
+ set to make this graceful with a timeout in milliseconds given by the
+ variable.
+
+ The shutdown list is limited to the max number of connections configured
+ for a multi cache. Set via CURLMOPT_MAX_TOTAL_CONNECTIONS. When the
+ limit is reached, the oldest connection on the shutdown list is
+ discarded.
+
+ - In multi_wait() and multi_waitfds(), collect all connection caches
+ involved (each transfer might carry its own) into a temporary list.
+ Let each connection cache on the list contribute sockets and
+ POLLIN/OUT events it's connections are waiting for.
+
+ - in multi_perform() collect the connection caches the same way and let
+ them peform their maintenance. This will make another non-blocking
+ attempt to shutdown all connections on its shutdown list.
+
+ - for event based multis (multi->socket_cb set), add the sockets and
+ their poll events via the callback. When `multi_socket()` is invoked
+ for a socket not known by an active transfer, forward this to the
+ multi's cache for processing. On closing a connection, remove its
+ socket(s) via the callback.
+
+ TLS connection filters MUST NOT send close nofity messages in their
+ `do_close()` implementation. The reason is that a TLS close notify
+ signals a success. When a connection is aborted and skips its shutdown
+ phase, the server needs to see a missing close notify to detect
+ something has gone wrong.
+
+ A graceful shutdown of FTP's data connection is performed implicitly
+ before regarding the upload/download as complete and continuing on the
+ control connection. For FTP without TLS, there is just the socket close
+ happening. But with TLS, the sent/received close notify signals that the
+ transfer is complete and healthy. Servers like `vsftpd` verify that and
+ reject uploads without a TLS close notify.
+
+ - added test_19_* for shutdown related tests
+ - test_19_01 and test_19_02 test for TCP RST packets
+ which happen without a graceful shutdown and should
+ no longer appear otherwise.
+ - add test_19_03 for handling shutdowns by the server
+ - add test_19_04 for handling shutdowns by curl
+ - add test_19_05 for event based shutdowny by server
+ - add test_30_06/07 and test_31_06/07 for shutdown checks
+ on FTP up- and downloads.
+
+ Closes #13976
+
+Daniel Stenberg (25 Jun 2024)
+
+- managen: fix blank line detection
+
+ Follow-up to d14a53eea7b87 which ruined the output somewhat.
+
+ Closes #14017
+
+- managen: output tabs for each 8 leading spaces
+
+ This replacing of eight leading spaces into tabs was already done for
+ the embedded uncompressed version in tool_hugehelp.c so it does not save
+ anything there. But the gzip compressed version ends up almost 2K
+ smaller.
+
+ The output in a terminal should be identical.
+
+ Before using TABs:
+
+ curl.txt 282492 bytes
+ curl.txt.gz 73261 bytes
+
+ With this change applied:
+
+ curl.txt 249382 bytes
+ curl.txt.gz 71470 bytes
+
+ Closes #14016
+
+- managen: error on trailing blank lines in input files
+
+ Ref: #14014
+ Closes #14015
+
+Viktor Szakats (25 Jun 2024)
+
+- tidy-up: more whitespace
+
+ Closes #14014
+
+Stefan Eissing (25 Jun 2024)
+
+- multi: multi_getsock(), check correct socket
+
+ - in phase CONNECTING/TUNNELING/PROTOCONNECT, retrieve
+ the socket from the connection filters and do not rely
+ on `conn->sockfd` being already set by the transfer.
+ - this applies to the default behaviour, a protocol handler
+ may override this via its callbacks.
+ - add a warning message in multi_getsock() when the transfer
+ is expected to have something in its pollset, but instead
+ it is empty.
+
+ Reported-by: saurabhsingh-dev on github
+ Fixes #13998
+ Closes #14011
+
+Daniel Stenberg (25 Jun 2024)
+
+- managen: fix each options footer to end with newline
+
+ A previous change sometimes made a command line option's description not
+ end with a newline immediately before the next command line.
+
+ Also widened the lines to wrap on column 79 instead of 78.
+
+ Closes #14010
+
+Alex Snast (25 Jun 2024)
+
+- wolfssl: assume key_file equal to clientcert in the absence of key_file
+
+ When user sets CURLOPT_SSLCERT but leaves CURLOPT_SSLKEY unset assume
+ the path passed in CURLOPT_SSLCERT holds the ssl key which is what we do
+ in openssl implementation.
+
+ Fixes #14007
+ Closes #14008
+
+Viktor Szakats (24 Jun 2024)
+
+- autotools: fix pkg-config names (zstd, ngtcp2*)
+
+ Also verified that all names now match up with CMake.
+
+ Follow-up to f057de5a1a950a90d1920021db152a4b695f1a8a #13911
+ Follow-up to eeab0ea7aa19af61af881e8a0bf9ff1f2e28ef79 #13994
+ Reported-by: æŽå››
+ Fixes #14005
+ Closes #14006
+
+- tidy-up: whitespace [ci skip]
+
+Daniel Stenberg (24 Jun 2024)
+
+- cmdline-docs: "added in" cleanups
+
+ - markup fixes
+ - remove some mentions of < 7.60.0 changes
+
+ Closes #14003
+
+- RELEASE-NOTES: synced
+
+- managen: "added in" fixes
+
+ - up the limit: remove all mentions of 7.60 or earlier from manpage
+ 7.60 is 6 years old now.
+ - warn on "broken" added in lines, as they avoid detection
+ - fixup added in markup in a few curldown files
+
+ Closes #14002
+
+Matt Jolly (24 Jun 2024)
+
+- configure: fix pkg-config library name 'libnghttp3'
+
+ Closes #13994
+
+Daniel Stenberg (24 Jun 2024)
+
+- managen: cleanups to generate nicer-looking output
+
+ - output "see also" last
+ - when there are multiple mutex items, use commas between all of them
+ except the last.
+ - call them mututally exclusive WITH not TO other options.
+ - remove trailing space from added in, add newline prefix
+ - smoother language for requires
+
+ Closes #14001
+
+- configure: require a QUIC library if nghttp3 is used
+
+ Instead of just silently disabling HTTP/3.
+
+ Reported-by: Matt Jolly
+ Fixes #13995
+ Closes #13999
+
+- docs/cmdline-opts: remove two superfluous "Added in" mentions
+
+ The key "added in" phrase for the option itself is added automatically.
+
+ Closes #14000
+
+- cookie-jar.md: see also --junk-session-cookies
+
+ Closes #13996
+
+- runtests: support crlf="yes" for the <stderr> section
+
+- TODO: -h option
+
+ Support "curl -h --insecure" etc to output the manpage section for the
+ --insecure command line option in the terminal. Should be possible to
+ work with either long or short versions of command line options.
+
+ Closes #13990
+
+- trace-ascii.md: mention "%" for stderr
+
+ Closes #13991
+
+- connect-to.md: expand with examples
+
+ - add referer from the resolve section to connect-to if user wants
+ wildcard for the port number
+
+ Closes #13989
+
+- TODO: connect to multiple IPs in parallel
+
+ Closes #13986
+
+- dump-header.md: mention minus for stdout
+
+ Closes #13985
+
+- CURLOPT_RESOLVE.md: mention hostname can be wildcard ('*')
+
+ Closes #13983
+
+Andy Pan (22 Jun 2024)
+
+- cf-socket: optimize curlx_nonblock() and check its return error
+
+ Reviewed-by: Stefan Eissing
+ Closes #13942
+
+z2_ (22 Jun 2024)
+
+- x509asn1: prevent NULL dereference
+
+ Closes #13978
+
+Daniel Stenberg (19 Jun 2024)
+
+- unit2604: use 'unitfail' instead of 'error' variable
+
+ Since the framework is already returning that variable by default.
+ Avoids a warning for unreachable code.
+
+ Reported-by: Tal Regev
+ Fixes #13967
+ Closes #13973
+
+- KNOWN_BUGS: TFTP tests fail on OpenBSD
+
+ Closes #13623
+ Closes #13975
+
+- VULN-DISCLOSURE-POLICY: NULL dereferences and crashes
+
+ If a malicious server can trigger a NULL dereference in curl or
+ otherwise cause curl to crash (and nothing worse), chances are big that
+ we do not consider that a security problem.
+
+ Closes #13974
+
+- RELEASE-NOTES: synced
+
+Sergey Markelov (19 Jun 2024)
+
+- mbedtls: support CURLOPT_CERTINFO
+
+ Closes #13113
+
+Daniel Stenberg (19 Jun 2024)
+
+- x509asn1: ASN1tostr() should fail when 'constructed' is set
+
+ This is a regression from my refactor in 623c3a8fa0bdb (#12808)
+
+ Follow-up to 623c3a8fa0bdb2751f14b37417
+
+ Closes #13972
+
+- x509asn1: remove two static variables
+
+ cnOID and sanOID were not used outside of the OID table anyway
+
+ Closes #13971
+
+brian m. carlson (18 Jun 2024)
+
+- TODO: TLS channel binding
+
+ Closes #13483
+
+Tal Regev (17 Jun 2024)
+
+- cmake: add CURL_USE_GSASL option with detection + CI test
+
+ Reviewed-by: Viktor Szakats
+ Closes #13948
+
+Daniel Stenberg (16 Jun 2024)
+
+- x509asn1: make Curl_extract_certinfo store error message
+
+ To help us all better understand where the error actually comes from.
+
+ Ref: #13958
+ Closes #13959
+
+Viktor Szakats (15 Jun 2024)
+
+- appveyor: dump build logs on failure in VS2008 jobs
+
+ This seems to be the only way to see what actual toolchain commands were
+ run, and with what arguments.
+
+ Without `dos2unix`, `cat` output comes out empty.
+
+ Closes #13957
+
+- cmake: fix quotes when appending multiple options (SecureTransport)
+
+ Copied from a vcpkg distro patch:
+ https://github.com/microsoft/vcpkg/blob/02745e0f4749d1f51d2025824209408f5a6c3
+ 614/ports/curl/dependencies.patch#L43C38-L44
+
+ Ref: https://github.com/microsoft/vcpkg/pull/38847
+ Ref: https://github.com/microsoft/vcpkg/commit/795f2f137e6cf6d985fcc927bffcaf
+ 9c0a96e4ac
+ Ref: https://github.com/microsoft/vcpkg/pull/38847/commits/36f0c917de5319e953
+ 61451fc0aef0698b264874#diff-ab5c23e5dc5df412539cc93e24b37abbc588e1918236f8abc
+ 019d676b270c85fR39 (sub-commit)
+
+ Authored-by: Kai Pastor
+ Closes #13953
+
+Daniel Stenberg (15 Jun 2024)
+
+- CURLOPT_NETRC.md: clarify what it does on Windows
+
+ Closes #13956
+
+- KNOWN_BUGS: "HTTP/2 + TLS spends a lot of time in recv"
+
+ Closes #13416
+ Closes #13955
+
+- RELEASE-NOTES: synced
+
+Yedaya Katsman (14 Jun 2024)
+
+- examples: add missing binaries to .gitignore
+
+ They were showing as changed when built. Add them sorted alphabetically,
+ while also moving a few more entries to sorted order.
+
+ Closes #13952
+
+- docs: reference non deprecated libcurl options
+
+ There are a places where man pages reference deprecated CURLOPT options,
+ where it doesn't make sense, replace them with the reccomended
+ replacement option.
+
+ also remove reference to the removed mesalink TLS backend
+
+ Closes #13951
+
+Daniel Stenberg (14 Jun 2024)
+
+- gnutls: pass in SNI name, not hostname when checking cert
+
+ The function we use is called 'gnutls_x509_crt_check_hostname()' but if
+ we pass in the hostname with a trailing dot, the check fails. If we pass
+ in the SNI name, which cannot have a trailing dot, it succeeds for
+ https://pyropus.ca./
+
+ I consider this as a flaw in GnuTLS and have submitted this issue
+ upstream:
+
+ https://gitlab.com/gnutls/gnutls/-/issues/1548
+
+ In order to work with old and existing GnuTLS versions, we still need
+ this change no matter how they view the issue or might change it in the
+ future.
+
+ Fixes #13428
+ Reported-by: Ryan Carsten Schmidt
+ Closes #13949
+
+- BINDINGS: update java link to one that exists
+
+ The previous java binding seems to have vanished. Link to one that still
+ exists.
+
+ Bug: https://github.com/curl/everything-curl/issues/456
+ Reported-by: Jiang Wenjian
+ Closes #13950
+
+renovate[bot] (14 Jun 2024)
+
+- GHA: update pinned actions
+
+ - github/codeql-action digest to 23acc5c
+ - actions/checkout digest to 692973e
+ - rojopolis/spellcheck-github-actions digest to d354a4d
+
+ Closes #13935
+ Closes #13945
+ Closes #13946
+
+Jay Satiro (14 Jun 2024)
+
+- tool_cb_hdr: allow etag and content-disposition for 3xx reply
+
+ - Parse etag and content-disposition headers for 3xx replies.
+
+ For example, a server may send a content-disposition filename header
+ with a redirect reply (3xx) but not with the final response (2xx).
+ Without this change curl would ignore the server's specified filename
+ and continue to use the filename extracted from the user-specified URL.
+
+ Prior to this change, 75d79a4 had limited etag and content-disposition
+ to 2xx replies only.
+
+ Tests-by: Daniel Stenberg
+
+ Reported-by: Morgan Willcock
+ Fixes https://github.com/curl/curl/issues/13302
+ Closes #13484
+
+Daniel Stenberg (13 Jun 2024)
+
+- transfer: set CSELECT_IN if there is data pending
+
+ When aborting the transfer loop early, like when there is rate limiting
+ in effect, there might be buffered data already read off the socket so
+ the socket might not signal reability. Therefore we must set the
+ CSELECT_IN manually if data_pending_() suggests there might be more data
+ to get. This is particularly noticeable with SSH when the underlying
+ library has drained the socket and holds pending data in its buffer.
+
+ Reported-by: alervd on github
+ Fixes #13695
+ Closes #13943
+
+Viktor Szakats (13 Jun 2024)
+
+- cmake: enable SOVERSION for Cygwin and `CMAKE_DLL_NAME_WITH_SOVERSION`
+
+ - enable SOVERSION when `CMAKE_DLL_NAME_WITH_SOVERSION=ON` is set.
+ Ref: https://cmake.org/cmake/help/v3.27/variable/CMAKE_DLL_NAME_WITH_SOVERS
+ ION.html
+ Use: https://github.com/search?q=-DCMAKE_DLL_NAME_WITH_SOVERSION&type=code
+
+ - enable SOVERSION for Cygwin builds by default.
+
+ Ref: #13936
+ Ref: #13944
+ Closes #13898
+
+- cmake: allow SOVERSION override with `CURL_LIBCURL_SOVERSION`
+
+ Allow overriding SOVERSION with the new CMake option:
+ `CURL_LIBCURL_SOVERSION=ON/OFF`
+
+ For certain target platforms the shared libcurl library filename
+ contains the SOVERSION. This new option allows to enable/disable
+ this behavior manually. If set, it takes precedence over the default
+ setting.
+
+ Ref: #13898
+ Closes #13944
+
+renovate[bot] (13 Jun 2024)
+
+- Dockerfile: update debian:bookworm-slim to 84d83b2
+
+ Closes #13934
+
+Daniel Stenberg (13 Jun 2024)
+
+- configure: use AC_MSG_WARN for TLS/experimental warning texts
+
+ - no longer warns for mbedtls
+ - warns for each item on individual lines
+ - no longer shows irrelevant TLS libraries when multiple are selected
+ - removes ech repetition
+
+ Closes #13941
+
+- GHA: detect and warn for more English contractions
+
+ As we try to avoid them in curl documentation
+
+ Closes #13940
+
+Stefan Eissing (13 Jun 2024)
+
+- transfer: do not use EXPIRE_NOW while blocked
+
+ - When a transfer sets `data->state.select_bits`, it is
+ scheduled for rerun with EXPIRE_NOW. If such a transfer
+ is blocked (due to PAUSE, for example), this will lead to
+ a busy loop.
+ - multi.c: check for transfer block
+ - sendf.*: add Curl_xfer_is_blocked()
+ - sendf.*: add client reader `is_paused()` callback
+ - implement is_paused()` callback where needed
+
+ Closes #13908
+
+renovate[bot] (13 Jun 2024)
+
+- ci: update dependency ngtcp2/ngtcp2 to v1.6.0
+
+ Closes #13939
+
+- ci: update ngtcp2/nghttp3 to v1.4.0
+
+ Closes #13938
+
+Viktor Szakats (13 Jun 2024)
+
+- cmake: stop setting SOVERSION for the static lib target
+
+ Also move the logic closer to its use and related tidy-ups.
+
+ Cherry-picked from #13898
+ Closes #13936
+
+Patrick Monnerat (13 Jun 2024)
+
+- os400: make it compilable again
+
+ A newly introduced use of getsockname() in the cli tool makes it require
+ the ascii wrapper module, which is not available outside of the library:
+ as the tool only uses the address family field (binary), disable
+ wrappers outside of libcurl.
+
+ Fix setsockopt() parameter type mismatch using a (void *) cast.
+
+ Sync ILE/RPG binding.
+
+ Closes #13930
+
+Viktor Szakats (13 Jun 2024)
+
+- libcurl.pc: add `Requires.private`, `Requires` for static linking
+
+ - cmake: populate for dependencies.
+ - autotools: populate for dependencies.
+ (including mbedtls, though the script does not detect
+ mbedtls through pkgconfig. mbedtls 3.6.0 now supports it.)
+
+ Skip dealing with gssapi in this patch.
+
+ Fixes #864
+ Closes #13911
+
+- cmake: bring `curl-config.cmake` closer to `FindCURL`
+
+ Set `CURL_LIBRARIES` and `CURL_INCLUDE_DIRS` variables
+ for compatibility with CMake's `FindCURL.cmake`:
+ https://github.com/Kitware/CMake/blob/b411d0146c2e06acfb0c823bb039e99f0191b61
+ 1/Modules/FindCURL.cmake#L209
+
+ For dependent projects, CMake's suggestion is to replace
+ `CURL_LIBRARIES` with `CURL::libcurl`, and drop `CURL_INCLUDE_DIRS`.
+
+ Reported-by: Aurélien Pierre
+ Ref: https://curl.se/mail/lib-2024-06/0014.html
+ Ref: https://gitlab.kitware.com/cmake/cmake/-/issues/24580
+ Closes #13897
+
+Daniel Stenberg (13 Jun 2024)
+
+- tool_getparam: fix the bsearch call for ip-tos names
+
+ Follow-up to 3c20ae08b9591
+ Reported-by: Samuel Chiang
+ Fixes #13932
+ Closes #13933
+
+- request: change the struct field bodywrites to a bool, only for hyper
+
+ Only hyper needs to know this, and it can use it as a boolean.
+
+ Closes #13928
+
+Andy Pan (12 Jun 2024)
+
+- test: fix CURLOPT_TCP_KEEPCNT typo
+
+ Follow up to b77d627d242
+
+ Closes #13931
+
+Daniel Stenberg (12 Jun 2024)
+
+- http: remove "struct HTTP"
+
+ It is not actually used anymore and only contained a dummy struct field.
+ Remove all traces and uses of it.
+
+ Closes #13927
+
+- cd2nroff: convert two warnings to errors
+
+ Since the warnings tend to get missed too easily and these are problems
+ we rather want addressed than letting slide.
+
+ Closes #13929
+
+- urlapi: use a correct value for CURLU_NO_GUESS_SCHEME
+
+ It was mistakenly set to the same value as CURLU_GET_EMPTY uses.
+
+ Reported-by: Patrick Monnerat
+ Bug: https://github.com/curl/curl/commit/655d44d139489625e77cf6790d36
+ Closes #13926
+
+- file: separate fake headers and body with a stand-alone CRLF
+
+ Instead of bolting on the extra CRLF to the final header - as that makes
+ the behavior inconsistent and not as documented. The final CRLF is now
+ also made unconditional, just like it is for HTTP.
+
+ Reported-by: dogma
+ Bug: https://curl.se/mail/lib-2024-06/0033.html
+ Closes #13925
+
+- RELEASE-NOTES: synced
+
+Andy Pan (12 Jun 2024)
+
+- tcpkeepalive: add CURLOPT_TCP_KEEPCNT and --keepalive-cnt
+
+ Closes #13885
+
+Daniel Stenberg (12 Jun 2024)
+
+- TODO: make it "Add missing features to TLS backends"
+
+ ... instead of just mentioning CA caching.
+
+ Closes #13924
+
+Orgad Shaneh (11 Jun 2024)
+
+- curl: support VLAN Priority: --vlan-priority
+
+ Add --vlan-priority option to the command line tool for setting VLAN
+ priority.
+
+ Closes #13907
+
+RainRat (11 Jun 2024)
+
+- misc: fix typos
+
+ Closes #13923
+
+Daniel Stenberg (11 Jun 2024)
+
+- CURLOPT_ECH.md: remove repeated 'if'
+
+ Closes #13922
+
+- vms: fixed language in comment
+
+ It started with me fixing a repeated "are are" but the wording was
+ incomprehensible so I tried to untangle it.
+
+ Closes #13921
+
+Stefan Eissing (11 Jun 2024)
+
+- lib: xfer_setup and non-blocking shutdown
+
+ - clarify Curl_xfer_setup() with RECV/SEND flags and different calls for
+ which socket they operate on. Add a shutdown flag for secondary
+ sockets
+ - change Curl_xfer_setup() calls to new functions
+ - implement non-blocking connection shutdown at the end of receiving or
+ sending a transfer
+
+ Closes #13913
+
+Daniel Stenberg (11 Jun 2024)
+
+- test1486: verify that write-out.md and tool_writeout.c are in sync
+
+ - also verify alphabetialal order in the source
+ - add two missing variables to write-out.md
+
+ Closes #13920
+
+Viktor Szakats (11 Jun 2024)
+
+- GHA: add cmake MSYS2 native job
+
+ curl, libcurl, examples, build-only.
+
+ To compare build behaviour with autotools.
+
+ Closes #13917
+
+Daniel Stenberg (11 Jun 2024)
+
+- openssl: shortcut store_expired for negative timeouts
+
+ Avoid some unnecessary computation if the timeout is negative.
+
+ Spotted by CodeSonar
+ Closes #13919
+
+- RELEASE-NOTES: synced
+
+- curl: support -w '%{num_retries}
+
+ Suggested-by: Jay Guerette
+ Ref: https://github.com/curl/curl/discussions/13901
+ Closes #13910
+
+Guilherme Puida (11 Jun 2024)
+
+- pytest: include testenv/vsftpd.py in dist tarball
+
+ Closes #13918
+
+Viktor Szakats (11 Jun 2024)
+
+- DISTROS: add MSYS2 (native) links
+
+ Also rename existing 'MSYS2' to 'MSYS2 (mingw-w64)'.
+
+ Closes #13915
+
+Daniel Stenberg (10 Jun 2024)
+
+- tool_writeout: get certinfo only when needing it
+
+ Removes a fairly expensive libcurl call when not necessary
+
+ Closes #13914
+
+- tool_writeout: bsearch the variable name
+
+ As the list of variable names grows, doing a simple loop to find the
+ name get increasingly worse. This switches to a bsearch.
+
+ Also: do a case sensitive check for the variable name. The names have
+ not been documented to be case insensitive and there is no point in
+ having them so.
+
+ Closes #13914
+
+Stefan Eissing (10 Jun 2024)
+
+- multi: prepare multi_wait() for future shutdown usage
+
+ - new struct curl_pollfds and struct curl_waitfds
+ - add structs and methods to init/add/cleanup an array of pollfd and
+ struct curl_waitfd. Use in multi_wait() and multi_waitfds() to
+ populate the sets for polling.
+ - place USE_WINSOCK WSAEventSelect() setting into a separate loop over
+ all collected pfds
+
+ Closes #13900
+
+- connection: shutdown TLS (for FTP) better
+
+ This adds connection shutdown infrastructure and first use for FTP. FTP
+ data connections, when not encountering an error, are now shut down in a
+ blocking way with a 2sec timeout.
+
+ - add cfilter `Curl_cft_shutdown` callback
+ - keep a shutdown start timestamp and timeout at connectdata
+ - provide shutdown timeout default and member in
+ `data->set.shutdowntimeout`.
+ - provide methods for starting, interrogating and clearing
+ shutdown timers
+ - provide `Curl_conn_shutdown_blocking()` to shutdown the
+ `sockindex` filter chain in a blocking way. Use that in FTP.
+ - add `Curl_conn_cf_poll()` to wait for socket events during
+ shutdown of a connection filter chain.
+ This gets the monitoring sockets and events via the filters
+ "adjust_pollset()" methods. This gives correct behaviour when
+ shutting down a TLS connection through a HTTP/2 proxy.
+ - Implement shutdown for all socket filters
+ - for HTTP/2 and h2 proxying to send GOAWAY
+ - for TLS backends to the best of their capabilities
+ - for tcp socket filter to make a final, nonblocking
+ receive to avoid unwanted RST states
+ - add shutdown forwarding to happy eyeballers and
+ https connect ballers when applicable.
+
+ Closes #13904
+
+Daniel Stenberg (7 Jun 2024)
+
+- CURLOPT_CONNECTTIMEOUT*: clarify, document the milliseond version
+
+ Provide an explanation in the CURLOPT_CONNECTTIMEOUT_MS page instead of
+ just referring to the non-MS version.
+
+ Closes #13905
+
+- cmdline-opts: tidy up --ip-tos and --mptcp
+
+ To make them render nicer in the manpage and minor polish.
+
+ Closes #13906
+
+- RELEASE-NOTES: synced
+
+Dorian Craps (7 Jun 2024)
+
+- curl: (on linux) add MPTCP support
+
+ Multipath TCP (MPTCP), standardized in RFC8684 [1], is a TCP extension
+ that enables a TCP connection to use different paths.
+
+ Multipath TCP has been used for several use cases. On smartphones, MPTCP
+ enables seamless handovers between cellular and Wi-Fi networks while
+ preserving established connections. This use-case is what pushed Apple
+ to use MPTCP since 2013 in multiple applications [2]. On dual-stack
+ hosts, Multipath TCP enables the TCP connection to automatically use the
+ best performing path, either IPv4 or IPv6. If one path fails, MPTCP
+ automatically uses the other path.
+
+ To benefit from MPTCP, both the client and the server have to support
+ it. Multipath TCP is a backward-compatible TCP extension that is enabled
+ by default on recent Linux distributions (Debian, Ubuntu, Redhat, ...).
+ Multipath TCP is included in the Linux kernel since version 5.6 [3]. To
+ use it on Linux, an application must explicitly enable it when creating
+ the socket. No need to change anything else in the application.
+
+ This attached patch adds an --mptcp option which allows the creation of
+ an MPTCP socket instead of TCP on Linux. If Multipath TCP is not
+ supported on the system, an error will be reported. It is important to
+ note that if the end server doesn't support MPTCP, the connection will
+ continue after a seamless fallback to TCP.
+
+ Link: https://www.rfc-editor.org/rfc/rfc8684.html [1]
+ Link: https://www.tessares.net/apples-mptcp-story-so-far/ [2]
+ Link: https://www.mptcp.dev [3]
+ Co-developed-by: Dorian Craps (@CrapsDorian) <doriancraps@gmail.com>
+ Co-developed-by: Olivier Bonaventure (@obonaventure) <Olivier.Bonaventure@ucl
+ ouvain.be>
+ Co-developed-by: Matthieu Baerts (@matttbe) <matttbe@kernel.org>
+ Signed-off-by: Dorian Craps <dorian.craps@student.vinci.be>
+
+ Closes #13278
+
+Orgad Shaneh (7 Jun 2024)
+
+- curl: support IP Type of Service / Traffic Class: --ip-tos
+
+ Add --ip-tos option to the command line tool for setting TOS for IPv4 or
+ Traffic Class for IPv6.
+
+ Closes #13606
+
+Andy Pan (7 Jun 2024)
+
+- socketpair: provide `Curl_socketpair` only when `!CURL_DISABLE_SOCKETPAIR`
+
+ Ref: https://curl.se/dev/log.cgi?id=20240605035856-3529577
+
+ Reported-by: Marcel Raad
+ Closes #13888
+
+Daniel Stenberg (7 Jun 2024)
+
+- noproxy: test bad ipv6 net size first
+
+ No need to parse anything if the size is out of range.
+
+ Added some tests to this effect to test 1614.
+
+ Closes #13902
+
+- managen: warn on excessively long help texts
+
+ Help texts at 49 characters or longer get a warning displayed because
+ they make --help output uglier and we should make an effort to keep the
+ help texts short and succinct.
+
+ The warning is only for display, it does not break the build. That is
+ left for the future if necessary.
+
+ I picked 49 because the longest current text is 48.
+
+ Closes #13895
+
+Viktor Szakats (5 Jun 2024)
+
+- lib: tidy up types and casts
+
+ Cherry-picked from #13489
+ Closes #13862
+
+Daniel Stenberg (5 Jun 2024)
+
+- cmdline-opts/ech.md: shorten the help text
+
+ To make --help look sensible again
+
+ Closes #13894
+
+- cmdline-opts/_PROTOCOLS.md: mention WS(S)
+
+ Closes #13891
+
+Viktor Szakats (5 Jun 2024)
+
+- GHA: disable TFTP and WebSockets tests in old-mingw-w64
+
+ Follow-up to 03bd16e5339b069aa9409b75fcab2b21fd3a4b16 #13860
+ Follow-up to def7d05382743ea7aa1d356d1e41dcb22ecdd4d7
+
+Daniel Stenberg (5 Jun 2024)
+
+- cmdline-opts/fail.md: expand and clarify
+
+ Closes #13890
+
+- doh-insecure.md: expand
+
+ Closes #13889
+
+- cmdline: expand proxy option explanations
+
+ - do less references to other options
+ - provide more specific text about proxies
+ - added more see-also references
+
+ Closes #13887
+
+- cmdline-opts: expand the parallel explanations
+
+ Closes #13886
+
+- RELEASE-NOTES: synced
+
+Stefan Eissing (5 Jun 2024)
+
+- vtls: new io_need flags for poll handling
+
+ - decouple need to recv/send from negotiation state, we need
+ this later in shutdown handling as well
+ - move ssl enums from urldata.h to vtls_int.h
+ - implement use of `connssl->io_need` in vtls.c. and all backends
+
+ Closes #13879
+
+Daniel Stenberg (5 Jun 2024)
+
+- cfilters: make Curl_conn_connect always assign 'done'
+
+ It could return error without assigning it, and we have a caller in
+ multi.c that assumes it gets set.
+
+ Spotted by CodeSonar
+ Closes #13884
+
+- CURLOPT_INTERFACE.md: quote the less-than and larger-than
+
+ Fixes the warnings shown on stderr.
+
+ Follow-up from 3060557af702dd5
+
+ Closes #13883
+
+- cmdline-opts/interface.md: expand the documentation
+
+ Explain the syntax it supports.
+
+ Closes #13882
+
+- url: allow DoH transfers to override max connection limit
+
+ When reaching the set maximum limit of allowed connections, allow a new
+ connection anyway if the transfer is created for the (internal) purpose
+ of doing a DoH name resolve. Otherwise, unrelated "normal" transfers can
+ starve out new DoH requests making it impossible to name resolve for new
+ transfers.
+
+ Bug: https://curl.se/mail/lib-2024-06/0001.html
+ Reported-by: kartatz
+ Closes #13880
+
+Viktor Szakats (5 Jun 2024)
+
+- windows: fix UWP builds, add GHA job
+
+ Add new job to test building for UWP (aka `CURL_WINDOWS_APP`).
+
+ Fix fallouts when building for UWP:
+ - rand: do not use `BCryptGenRandom()`.
+ - cmake: disable using win32 LDAP.
+ - cmake: disable telnet.
+ - version_win32: fix code before declaration.
+ - schannel: disable `HAS_MANUAL_VERIFY_API`.
+ - schannel: disable `SSLSUPP_PINNEDPUBKEY`
+ and make `schannel_checksum()` a stub.
+ Ref: e178fbd40a896f2098278ae61e1166c88e7b31d0 #1429
+ - schannel: make `cert_get_name_string()` a failing stub.
+ - system_win32: make `Curl_win32_impersonating()` a failing stub.
+ - system_win32: try to fix `Curl_win32_init()` (untested).
+ - threads: fix to use `CreateThread()`.
+ - src: disable searching `PATH` for the CA bundle.
+ - src: disable bold text support and capability detection.
+ - src: disable `getfiletime()`/`setfiletime()`.
+ - tests: make `win32_load_system_library()` a failing stub.
+ - tests/server/util: make it compile.
+ - tests/server/sockfilt: make it compile.
+ - tests/lib3026: fix to use `CreateThread()`.
+
+ See individual commits for build error details.
+
+ Some of these fixes may have better solutions, and some may not work
+ as expected. The goal of this patch is to make curl build for UWP.
+
+ Closes #13870
+
+Orgad Shaneh (4 Jun 2024)
+
+- socket: support binding to interface *AND* IP
+
+ Introduce new notation for CURLOPT_INTERFACE / --interface:
+ ifhost!<interface>!<host>
+
+ Binding to an interface doesn't set the address, and an interface can
+ have multiple addresses.
+
+ When binding to an address (without interface), the kernel is free to
+ choose the route, and it can route through any device that can access
+ the target address, not necessarily the one with the chosen address.
+
+ Moreover, it is possible for different interfaces to have the same IP
+ address, on which case we need to provide a way to be more specific.
+
+ Factor out the parsing part of interface option, and add unit tests:
+ 1663.
+
+ Closes #13719
+
+Andy Pan (4 Jun 2024)
+
+- socketpair: add `eventfd` and use `SOCK_NONBLOCK` for `socketpair()`
+
+ Currently, we use `pipe` for `wakeup_create`, which requires ***two***
+ file descriptors. Furthermore, given its complexity inside, `pipe` is a
+ bit heavyweight for just a simple event wait/notify mechanism.
+
+ `eventfd` would be a more suitable solution for this kind of scenario,
+ kernel also advocates for developers to use `eventfd` instead of `pipe`
+ in some simple use cases:
+
+ Applications can use an eventfd file descriptor instead of a pipe
+ (see pipe(2) in all cases where a pipe is used simply to signal
+ events. The kernel overhead of an eventfd file descriptor is much
+ lower than that of a pipe, and only one file descriptor is required
+ (versus the two required for a pipe).
+
+ This change adds the new backend of `eventfd` for `wakeup_create` and
+ uses it where available, eliminating the overhead of `pipe`. Also, it
+ optimizes the `wakeup_create` to eliminate the system calls that make
+ file descriptors non-blocking by moving the logic of setting
+ non-blocking flags on file descriptors to `socketpair.c` and using
+ `SOCK_NONBLOCK` for `socketpair(2)`, `EFD_NONBLOCK` for `eventfd(2)`.
+
+ Ref:
+ https://man7.org/linux/man-pages/man7/pipe.7.html
+ https://man7.org/linux/man-pages/man2/eventfd.2.html
+ https://man7.org/linux/man-pages/man2/socketpair.2.html
+ https://www.gnu.org/software/gnulib/manual/html_node/eventfd.html
+
+ Closes #13874
+
+renovate[bot] (4 Jun 2024)
+
+- ci: update github/codeql-action digest to 2e230e8
+
+ Closes #13881
+
+Jay Satiro (4 Jun 2024)
+
+- examples/threaded-ssl: remove locking callback code
+
+ - Remove the locking callback code that demonstrates how to meet
+ requirements of threading libraries (mainly OpenSSL).
+
+ Locking callback code has not been needed for many years. According to
+ the documentation for OpenSSL and GnuTLS they are thread-safe by design,
+ assuming support for the underlying OS threading API is built-in.
+
+ Ref: https://github.com/curl/curl/pull/13850#issuecomment-2143538458
+
+ Closes https://github.com/curl/curl/pull/13851
+
+Viktor Szakats (4 Jun 2024)
+
+- tests: delete redundant `!MSDOS` guard
+
+ This fix was supposed to be committed earlier, but ended up missing from
+ the final commit.
+
+ Follow-up to e9a7d4a1c8377dbcf9a2d94365f60e3e5dff48f8 #12376
+ Closes #13878
+
+- lib: fix thread entry point to return `DWORD` on WinCE
+
+ We already do this in `tests/server/util.c`:
+ https://github.com/curl/curl/blob/97e5e37cc8269660bc5d4a1936f10f2390b97c5a/te
+ sts/server/util.c#L604-L606
+ and in `sockfilt.c`, `lib3026.c`.
+
+ Before this patch it returned `unsigned int`.
+
+ Closes #13877
+
+Andy Pan (4 Jun 2024)
+
+- socket: use SOCK_NONBLOCK to eliminate extra system call
+
+ Every time function `cf_socket_open()` is called to create a socket,
+ `curlx_nonblock()` is called to make that socket non-blocking. And
+ `curlx_nonblock()` will cost us 1 or 2 system calls (2 for `fcntl()`, 1
+ for `ioctl()`, etc.), meanwhile, tucking `SOCK_NONBLOCK` and
+ `SOCK_CLOEXEC` into the `type` argument for `socket()` is widely
+ supported across UNIX-like OS: Linux, *BSD, Solaris, etc. With that
+ ability, we can save 1 or 2 system calls on each socket.
+
+ Another change in this PR is to eliminate the redundant
+ `curlx_nonblock()` call on the socket in `cf_udp_setup_quic()` as that
+ socket created by `cf_socket_open()` is already non-blocking.
+
+ Ref:
+ https://man7.org/linux/man-pages/man2/socket.2.html
+ https://man.freebsd.org/cgi/man.cgi?socket(2)
+ https://man.dragonflybsd.org/?command=socket&section=2
+ https://man.netbsd.org/socket.2
+ https://man.openbsd.org/socket
+ https://docs.oracle.com/cd/E88353_01/html/E37843/socket-3c.html
+ https://illumos.org/man/3SOCKET/socket
+ ...
+
+ Closes #13855
+
+Viktor Szakats (4 Jun 2024)
+
+- GHA: show cmake error log in Windows and non-native workflows
+
+ CMake configure doesn't fail often, but when it does, it helps to see
+ its `CMakeFiles/CMakeConfigureLog.yaml` output. This file is present
+ since CMake v3.26:
+ https://cmake.org/cmake/help/v3.26/manual/cmake-configure-log.7.html
+
+ (Older CMake versions save similar contend to
+ `CMakeFiles\CMakeOutput.log` and
+ `CMakeFiles\CMakeError.log`. This patch doesn't deal with that because
+ the workflows touched are all running a newer CMake.)
+
+ After this patch, we dump the content if cmake fails. Syncing this with
+ autotools, where we already did that.
+
+ Closes #13872
+
+- GHA: switch a Windows job to UCRT (gcc)
+
+ Cherry-picked from #13870
+
+- curl-config: revert to backticks to support old target envs
+
+ Make an exception for `curl-config` because this script that may be
+ running on any target system, including old ones, e.g. SunOS 5.10.
+
+ Reported-by: Alejandro R. Sedeño
+ Ref: https://github.com/curl/curl/pull/13307#issuecomment-2146427358
+ Follow-up to fa69b41c7790fab86fd363242c81d8ef2e89e183 #13307
+ Closes #13871
+
+Stefan Eissing (4 Jun 2024)
+
+- mbedtls: v3.6.0 workarounds
+
+ - add special sauce to disable unwanted peer verification by mbedtls
+ when negotiating TLS v1.3
+ - add special sauce for MBEDTLS_ERR_SSL_RECEIVED_NEW_SESSION_TICKET
+ return code on *writing* TLS data. We assume the data had not been
+ written and EAGAIN.
+ - return correct Curl error code when peer verification failed.
+ - disable test_08_05 with 50 HTTP/1.1 connections, as mbedtls reports a
+ memory allocation failed during handshake.
+ - bump CI mbedtls version to 3.6.0
+
+ Fixes #13653
+ Closes #13838
+
+- gnutls: support CA caching
+
+ - similar to openssl, use a shared 'credentials' instance
+ among TLS connections with a plain configuration.
+ - different to openssl, a connection with a client certificate
+ is not eligible to sharing.
+ - document CURLOPT_CA_CACHE_TIMEOUT in man page
+
+ Closes #13795
+
+Dan Fandrich (3 Jun 2024)
+
+- tests: don't log buffer length in throwing away message
+
+ It's not available at that point, and it will be written in the
+ non-error case right afterward.
+
+- tests: log "Throwing away" messages before throwing away
+
+ In case the read that follows hangs we'll get a clue as to what it was
+ doing.
+
+- CI: reduce memory request for FreeBSD builds
+
+ Also, add a comment with link to the Cirrus credit page since it's not
+ easy to find otherwise.
+
+Andy Pan (3 Jun 2024)
+
+- tcpkeepalive: support setting TCP keep-alive parameters on Solaris <11.4
+
+ Solaris didn't support TCP_KEEPIDLE and TCP_KEEPINTVL until 11.4,
+ before that it use TCP_KEEPALIVE_THRESHOLD and TCP_KEEPALIVE_ABORT_THRESHOLD
+ as the substitute. Therefore, for Solaris <11.4 we need to use this substitut
+ e
+ for setting TCP keep-alive parameters.
+
+ Ref:
+ https://docs.oracle.com/cd/E86824_01/html/E54777/tcp-7p.html
+ https://docs.oracle.com/cd/E88353_01/html/E37851/tcp-4p.html
+
+ Closes #13864
+
+Daniel Stenberg (3 Jun 2024)
+
+- KNOWN_BUGS: quiche: QUIC connection is draining
+
+ Closes #12037
+ Closes #13867
+
+- KNOWN_BUGS: aws-sigv4 has problems with particular URLs
+
+ Closes #13058
+ Closes #13866
+
+- KNOWN_BUGS: aws-sigv4 does not handle multipart/form-data correctly
+
+ Closes #13351
+ Closes #13866
+
+- RELEASE-NOTES: synced
+
+Viktor Szakats (3 Jun 2024)
+
+- GHA: fix old mingw-w64 32-bit job
+
+ This toolchain resides in the `mingw32` directory. Make sure to
+ configure `PATH` accordingly.
+
+ Before this patch, it pointed to a non-existing `mingw64` directory,
+ making the job use the wrong compiler (gcc 12, 64-bit).
+
+ Follow-up to e838b341a08b44d4a8486fb0d3f15d12fc794c62 #12927
+ Closes #13863
+
+Daniel Stenberg (2 Jun 2024)
+
+- tool_cb_hdr: return error for failed header writes
+
+ By checking that fflush() works.
+
+ Reported-by: Sebastian Andersson
+ Fixes #13836
+ Closes #13859
+
+Viktor Szakats (2 Jun 2024)
+
+- GHA: bump all build jobs to nproc+1
+
+ - bump rest of the workflows (windows, macos, distrocheck).
+
+ - non-native virtualized envs have 2 CPUs, bump down accordingly.
+ (for `vmactions/omnios-vm` it's just a guess.)
+
+ - bump all to nproc + 1.
+
+ Follow-up to e838b341a08b44d4a8486fb0d3f15d12fc794c62 #12927
+ Closes #13807
+
+- GHA: disable MQTT and WebSocket tests in Windows jobs
+
+ Trying to figure out which category is causing the remaining hangs.
+
+ Follow-up to def7d05382743ea7aa1d356d1e41dcb22ecdd4d7
+ Closes #13860
+
+- lib/v*: tidy up types and casts
+
+ Also add a couple of negative checks.
+
+ Cherry-picked from #13489
+ Closes #13622
+
+- GHA: fix caching old mingw-w64 toolchains in the Windows workflow
+
+ - stop altering the `PATH` via `GITHUB_ENV`. This confused the
+ `actions/cache` post-job, which needs to run in the exact same
+ environment as its pre-job, to have a consistent cache entry "version"
+ hash. Altering the `PATH` via `GITHUB_ENV` spills into the the
+ post-job and breaks this hash. GHA doesn't reset the env automatically
+ and I have not found a way to do it manually.
+
+ - add double-quotes where missing.
+
+ - move cache directory under `USERPROFILE` to not rely on absolute
+ paths.
+
+ - make cache directory flatter and versionless.
+
+ Follow-up to 0914d8aadddac0d1459673d5b7f77e8f3378b22b #13759
+ Closes #13856
+
+renovate[bot] (2 Jun 2024)
+
+- ci: pin actions/github-script action to 60a0d83
+
+ Closes #13846
+
+Bo Anderson (2 Jun 2024)
+
+- x509asn1: add some common ECDSA OIDs
+
+ Closes #13857
+
+renovate[bot] (2 Jun 2024)
+
+- ci: update rojopolis/spellcheck-github-actions digest to e36f662
+
+ Closes #13852
+
+Bo Anderson (2 Jun 2024)
+
+- x509asn1: fallback to dotted OID representation
+
+ Reported-by: Luke Hamburg
+ Fixes #13845
+ Closes #13858
+
+Lee Li (2 Jun 2024)
+
+- request.md: language fix
+
+ improved for better readability and correctness
+
+ Closes #13854
+
+Christian Schmitz (2 Jun 2024)
+
+- vtls: deprioritize Secure Transport
+
+ Moved Secure Transport behind OpenSSL, so we can build CURL with both
+ and prefer using OpenSSL over Secure Transport by default.
+
+ Closes #13547
+
+Daniel Stenberg (1 Jun 2024)
+
+- urlapi: add CURLU_NO_GUESS_SCHEME
+
+ Used for extracting:
+
+ - when used asking for a scheme, it will return CURLUE_NO_SCHEME if the
+ stored information was a guess
+
+ - when used asking for a URL, the URL is returned without a scheme, like
+ when previously given to the URL parser when it was asked to guess
+
+ - as soon as the scheme is set explicitly, it is no longer internally
+ marked as guessed
+
+ The idea being:
+
+ 1. allow a user to figure out if a URL's scheme was set as a result of
+ guessing
+
+ 2. extract the URL without a guessed scheme
+
+ 3. this makes it work similar to how we already deal with port numbers
+
+ Extend test 1560 to verify.
+
+ Closes #13616
+
+- wolfssl: support CA caching
+
+ As a bonus, add SSLSUPP_CA_CACHE to let TLS backends signal its support
+ for this so that *setopt() return error if there is no support.
+
+ Closes #13786
+
+Andy Pan (1 Jun 2024)
+
+- socket: change TCP keepalive from ms to seconds on DragonFly BSD
+
+ DragonFly BSD changed the time unit for TCP keep-alive from milliseconds
+ to seconds since v5.8, thus setting the keepalive options with
+ milliseconds with curl/libcurl will result in unexpected behaviors on
+ DragonFlyBSD 5.8+
+
+ Distinguish the DragonFly BSD versions and use the proper time units
+ accordingly.
+
+ Ref:
+ https://lists.dragonflybsd.org/pipermail/commits/2019-July/719125.html
+ https://github.com/DragonFlyBSD/DragonFlyBSD/blob/965b380e960908836b97aa034
+ fa2753091e0172e/sys/sys/param.h#L207
+
+ Fixes #13847
+ Closes #13848
+
+Daniel Stenberg (1 Jun 2024)
+
+- curlver.h: aiming for 8.9.0
+
+- noproxy: patterns need to be comma separated
+
+ or they will not parse correctly.
+
+ Mentioned in DEPRECATED since Janurary 2023 (in 7ad8a7ba9ebdedc).
+
+ Closes #13789
+
+Jan Venekamp (1 Jun 2024)
+
+- sectransp: remove large cipher table
+
+ Previously a large table of ciphers was used to determine the default
+ ciphers and to lookup manually selected ciphers names.
+
+ With the lookup of the manually selected cipher names moved to
+ Curl_cipher_suite_walk_str() the large table is no longer needed for
+ that purpose.
+
+ The list of manually selected cipher can now be intersected with the
+ ciphers supported by Secure Transport (SSLGetSupportedCiphers()),
+ instead of using the fixed table for that.
+
+ The other use of the table was to filter the list of all supported
+ ciphers offered by Secure Transport to create a list of ciphers to
+ use by default, excluding ciphers in the table marked as weak.
+
+ Instead of using a complement based approach (exclude weak), switch
+ to using an intersection with a smaller list of ciphers deemed
+ appropriate.
+
+ Closes #13823
+
+Tatsuhiro Tsujikawa (1 Jun 2024)
+
+- GHA: unify http3 workflows into one
+
+ This commit unifies the following http3 workflows into http3-linux.yml:
+
+ - ngtcp2-linux.yml
+ - osslq-linux.yml
+ - quiche-linux.yml
+
+ The idea is better use of the build cache. Previously, they
+ independently create caches with the same key. Some of the caches
+ include source code and intermediate object files, which makes cache
+ quite large. In this commit, only built artifacts are cached, which
+ drastically reduces the cache size. OpenSSL v3, mod_h2 and quiche caches
+ still include all stuff, but they are left for the later improvement.
+ Because the contents of the cache have been changed, the cache keys are
+ also changed to include the word "http3".
+
+ Closes #13841
+
+Stephen Farrell (1 Jun 2024)
+
+- openSSL: fix hostname handling when using ECH
+
+ Reported-by: vvb2060
+ Fixes #13818
+ Closes #13822
+
+renovate[bot] (1 Jun 2024)
+
+- ci: update github/codeql-action digest to f079b84
+
+ Closes #13837
+
+Daniel Stenberg (1 Jun 2024)
+
+- RELEASE-NOTES: synced
+
+- curl_multi_poll.md: expand the example with an custom file descriptor
+
+ Closes #13842
+
+Christian Heusel (1 Jun 2024)
+
+- DISTROS: add a link to the list archive
+
+ Related to https://github.com/curl/curl/discussions/13833
+
+ Signed-off-by: Christian Heusel <christian@heusel.eu>
+ Closes #13843
+
+Matt Jolly (31 May 2024)
+
+- autoconf: remove 'deeper' checks for `AC_CHECK_FUNCS`
+
+ The net effect of the deeper checks is to raise implicit function decls
+ on modern compilers.
+
+ These checks appear to have been added ~20 years ago, relating to an
+ unverifiable claim about HP-UX. Autoconf support for the platform has
+ grown in leaps and bounds since.
+
+ It didn't cause a real problem here, but when investigating a FP this
+ came up. No evidence has been identified that this was actually broken
+ in the past, and there is no evidence that this is necessary now.
+
+ `-Werror=implicit-function-declarations` is enabled for both checks;
+ without a working prototype they will both fail regardless. In the
+ second case there will in fact never be a working prototype and
+ therefore it will always fail unconditionally.
+
+ `AC_CHECK_FUNCS` does effectively the same thing as the removed checks,
+ except it actually defines a dummy prototype to see if it links.
+
+ If `AC_CHECK_FUNCS` is broken on a given platform we have bigger
+ problems than trying to build cURL. This should also be faster.
+
+ Bug: https://bugs.gentoo.org/932827
+ Reviewed-By: Eli Schwartz <eschwartz93@gmail.com>
+ Closes #13830
+
+Jay Satiro (30 May 2024)
+
+- cf-socket: improve SO_SNDBUF update for Winsock
+
+ - Rename: Curl_sndbufset => Curl_sndbuf_init
+
+ - Rename: win_update_buffer_size => win_update_sndbuf_size
+
+ - Save the last set SO_SNDBUF size to compare against so that we can
+ avoid setsockopt calls every second.
+
+ This is a follow-up to 0b520e12 which moved the SO_SNDBUF update check
+ into cf-socket. This change improves it further by making the function
+ names easier to understand and reducing the amount of setsockopt calls.
+
+ Closes https://github.com/curl/curl/pull/13827
+
+Viktor Szakats (30 May 2024)
+
+- tidy-up: use consistent casing for Windows directories
+
+ C:\Windows\System32
+
+ Closes #13832
+
+- GHA: use ubuntu-latest with OmniOS job
+
+ It's the same as ubuntu-22.04.
+
+ Also update OmniOS package search link.
+
+ Closes #13831
+
+Ayesh Karunaratne (30 May 2024)
+
+- GHA: adjust parallel job counts
+
+ Adjusts the `make -j` flag to match the latest GitHub-hosted runner
+ hardware specs[^1]:
+
+ - `ubuntu-latest` on 4 CPU cores
+ - `macos-latest` on 3 CPU cores
+
+ The processor count is ideally obtained from `nproc`, but setting env
+ vars from the current CI yaml files is not possible because they expect
+ literal strings.
+
+ [^1]: https://docs.github.com/en/actions/using-github-hosted-runners/about-gi
+ thub-hosted-runners/about-github-hosted-runners#standard-github-hosted-runner
+ s-for-public-repositories
+
+ Closes #12927
+
+pszlazak (30 May 2024)
+
+- get.d: clarify the explanation
+
+ Closes #13706
+
+Daniel Stenberg (30 May 2024)
+
+- curl_url_set.md: libcurl only parses :// URLs
+
+ Make it clearer in the documentation.
+
+ Closes #13821
+
+Stefan Eissing (30 May 2024)
+
+- multi: fix multi_wait() timeout handling
+
+ - determine the actual poll timeout *after* all sockets
+ have been collected. Protocols and connection filters may
+ install new timeouts during collection.
+ - add debug logging to test1533 where the mistake was noticed
+
+ Reported-by: Matt Jolly
+ Fixes #13782
+ Closes #13825
+
+Viktor Szakats (29 May 2024)
+
+- lib: prefer `var = time(NULL)` over `time(&var)`
+
+ Following up on previous occurrences showing up as gcc warnings, replace
+ the remaining `time(&var)` calls with `var = time(NULL)`, though these
+ aren't specifically causing compiler warnings. These are in the TFTP
+ client code (`lib/tftp.c`), except one which is in a debug branch in
+ `lib/http_aws_sigv4.c`.
+
+ What's unexplainable is that this patch seems to mitigate TFTP tests
+ often hanging or going into an infinite loop on GHA windows workflows
+ with MSYS2, mingw-w64 and MSVC (Cygwin is unaffected):
+ https://github.com/curl/curl/pull/13599#issuecomment-2119372376
+ TFTP hangs did not entirely disappear though, so could be unrelated.
+
+ `time()` docs:
+ https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/time-time32
+ -time64
+ https://manpages.debian.org/bookworm/manpages-dev/time.2.en.html
+
+ Follow-up to 58ca0a2f0743a586716ca357c382b29e3f08db69 #13800
+ Follow-up to d0728c9109629ee82b855b350a4c3f1f52ee61df #13643
+ Closes #13815
+
+Stefan Eissing (29 May 2024)
+
+- winsock: move SO_SNDBUF update into cf-socket
+
+ - Move the code that updates the SO_SNDBUF size for Windows to
+ cf_socket_send.
+
+ Prior to this change the code was in readwrite_upload but the socket
+ filter is the more appropriate place because it applies to all sends.
+
+ Background:
+
+ For Windows users SO_SNDBUF (the total per-socket buffer size reserved
+ by Winsock for sends) is updated dynamically by libcurl during the
+ transfer. This is because Windows does not do it automatically for
+ non-blocking sockets and without it the performance of large transfers
+ may suffer.
+
+ Closes https://github.com/curl/curl/pull/13763
+
+Jan Venekamp (29 May 2024)
+
+- sectransp: use common code for cipher suite lookup
+
+ Take advantage of the Curl_cipher_suite_walk_str() and
+ Curl_cipher_suite_get_str() functions introduced in commit fba9afe.
+
+ Closes #13521
+
+Matthias Gatto (29 May 2024)
+
+- aws-sigv4: url encode the canonical path
+
+ Refactors canon_query, so it could use the encoding part of the function
+ to use it in the path.
+
+ As the path doesn't encode '/', but encode '=', I had to add some
+ conditions to know If I was doing the query or path encoding.
+
+ Also, instead of adding a `bool in_path` variable, I use `bool
+ *found_equals` to know if the function was called for the query or path,
+ as found_equals is used only in query_encoding.
+
+ Test 472 verifies.
+
+ Reported-by: Alexander Shtuchkin
+ Fixes #13754
+ Closes #13814
+
+ Signed-off-by: Matthias Gatto <matthias.gatto@outscale.com>
+
+Daniel Stenberg (29 May 2024)
+
+- cd2nroff: use an empty "##" to signal end of .IP sequence
+
+ Like when we list a series of options and then want to add "normal" text
+ again afterwards.
+
+ Without this, the indentation level wrongly continues even after the
+ final "##" header, making following text wrongly appear to belong to the
+ header above.
+
+ Adjusted several curldown files to use this.
+
+ Fixes #13803
+ Reported-by: Jay Satiro
+ Closes #13806
+
+vvb2060 (28 May 2024)
+
+- openssl: fix %-specifier in infof() call
+
+ Closes #13816
+
+Daniel Stenberg (28 May 2024)
+
+- curl: make warnings and other messages aware of terminal width
+
+ This removes unnecessary line wraps when the terminal is wider than 79
+ columns and it also makes messages look better in narrower terminals.
+
+ The get_terminal_columns() function is not split out into its own source
+ file.
+
+ Suggested-by: Elliott Balsley
+ Fixes #13804
+ Closes #13808
+
+Viktor Szakats (28 May 2024)
+
+- GHA: enable tests 1139, 1177, 1477 on Windows
+
+ These exclusions came from the AppVeyor CI config, but they do pass now
+ and they are static tests with no flakiness risk.
+
+ Follow-up to 0914d8aadddac0d1459673d5b7f77e8f3378b22b #13759
+ Closes #13817
+
+Dan Fandrich (28 May 2024)
+
+- CI: Improve labeler tag detection
+
+ Also, simplify patterns with a single glob.
+
+Viktor Szakats (28 May 2024)
+
+- GHA: disable TFTP tests in Windows jobs
+
+ Shot in the dark trying to find out which tests are
+ hanging / going to an infinite loop.
+
+ The ones failing after 45 minutes (mingw-w64) or 30 minutes (MSVC).
+
+ Ref: https://github.com/curl/curl/pull/13599#issuecomment-2119372376
+
+renovate[bot] (28 May 2024)
+
+- ci: update vmactions/omnios-vm digest to a61ca1e
+
+ Closes #13801
+
+Daniel Stenberg (28 May 2024)
+
+- openssl/gnutls: rectify the TLS version checks for QUIC
+
+ The versions check wrongly complained and return error if the *minimum*
+ version was set to something less than 1.3. QUIC is always TLS 1.3, but
+ that means minimum 1.2 is still fine to ask for.
+
+ This also renames the local variable to make the mistake harder to make
+ in the future.
+
+ Regression shipped in 8.8.0
+
+ Follow-up to 3210101088dfa3d6a125
+
+ Reported-by: fds242 on github
+ Fixes #13799
+ Closes #13802
+
+Stefan Eissing (28 May 2024)
+
+- gnutls: improve TLS shutdown
+
+ local ftp upload tests sometimes failed with an invalid TLS record being
+ reported by gnutls. vsftp did log that the shutdown was not regarded as
+ clean, failing the control connection thereafter.
+
+ These changes make test_31_05 work reliable locally.
+
+ - on closing the SSL filter, shutdown READ *and* WRITE
+ - on closing, try a receive after shutdown is sent
+ - convert to DEBUGF to CURL_TRC_CF
+
+ Closes #13790
+
+Daniel Stenberg (28 May 2024)
+
+- RELEASE-NOTES: synced
+
+- tests: run with "--trace-config all" to provide even more info
+
+ in case of problems.
+
+ Closes #13791
+
+Viktor Szakats (28 May 2024)
+
+- build: untangle `CURLDEBUG` and `DEBUGBUILD` macros
+
+ `CURLDEBUG` is meant to enable memory tracking, but in a bunch of cases,
+ it was protecting debug features that were supposed to be guarded with
+ `DEBUGBUILD`.
+
+ Replace these uses with `DEBUGBUILD`.
+
+ This leaves `CURLDEBUG` uses solely for its intended purpose: to enable
+ the memory tracking debug feature.
+
+ Also:
+ - autotools: rely on `DEBUGBUILD` to enable `checksrc`.
+ Instead of `CURLDEBUG`, which worked in most cases because debug
+ builds enable `CURLDEBUG` by default, but it's not accurate.
+ - include `lib/easyif.h` instead of keeping a copy of a declaration.
+ - add CI test jobs for the build issues discovered.
+
+ Ref: https://github.com/curl/curl/pull/13694#issuecomment-2120311894
+ Closes #13718
+
+- examples: delete unused includes
+
+ Delete a bunch of unnecessary-looking headers from some examples. This
+ is known to be tricky on AIX (perhaps also in other less-tested envs).
+
+ Let me know if any of this looks incorrect or outright fails on some
+ systems.
+
+ Follow-up to d4b85890555388bec212b75f47a5c1a48705b156 #13771
+ Closes #13785
+
+- appveyor: fixup job name [ci skip]
+
+ Follow-up to fc8e0dee3045658f293452121f5290d81ba3aa1e #13694
+
+- cmake: fix `-Wredundant-decls` in unity/mingw-w64/gcc/curldebug/DLL builds
+
+ It affected cmake-unity shared-curltool curldebug mingw-w64 gcc builds
+ when building the `testdeps` target.
+
+ Apply the solution already used in `lib/base64.c` and `lib/dynbuf.c`
+ to fix it.
+
+ Also update an existing GHA CI job to test the issue fixed.
+
+ ```
+ In file included from curl/lib/version_win32.c:35,
+ from curl/_bld/src/CMakeFiles/curl.dir/Unity/unity_0_c.c:145
+ :
+ curl/lib/memdebug.h:52:14: error: redundant redeclaration of 'curl_dbg_logfil
+ e' [-Werror=redundant-decls]
+ 52 | extern FILE *curl_dbg_logfile;
+ | ^~~~~~~~~~~~~~~~
+ In file included from curl/src/slist_wc.c:32,
+ from curl/_bld/src/CMakeFiles/curl.dir/Unity/unity_0_c.c:4:
+ curl/lib/memdebug.h:52:14: note: previous declaration of 'curl_dbg_logfile' w
+ ith type 'FILE *' {aka 'struct _iobuf *'}
+ 52 | extern FILE *curl_dbg_logfile;
+ | ^~~~~~~~~~~~~~~~
+ curl/lib/memdebug.h:55:44: error: redundant redeclaration of 'curl_dbg_malloc
+ ' [-Werror=redundant-decls]
+ 55 | CURL_EXTERN ALLOC_FUNC ALLOC_SIZE(1) void *curl_dbg_malloc(size_t siz
+ e,
+ | ^~~~~~~~~~~~~~~
+ curl/lib/memdebug.h:55:44: note: previous declaration of 'curl_dbg_malloc' wi
+ th type 'void *(size_t, int, const char *)' {aka 'void *(long long unsigned
+ int, int, const char *)'}
+ 55 | CURL_EXTERN ALLOC_FUNC ALLOC_SIZE(1) void *curl_dbg_malloc(size_t siz
+ e,
+ | ^~~~~~~~~~~~~~~
+ [...]
+ curl/lib/memdebug.h:110:17: error: redundant redeclaration of 'curl_dbg_fclos
+ e' [-Werror=redundant-decls]
+ 110 | CURL_EXTERN int curl_dbg_fclose(FILE *file, int line, const char *sou
+ rce);
+ | ^~~~~~~~~~~~~~~
+ curl/lib/memdebug.h:110:17: note: previous declaration of 'curl_dbg_fclose' w
+ ith type 'int(FILE *, int, const char *)' {aka 'int(struct _iobuf *, int, c
+ onst char *)'}
+ 110 | CURL_EXTERN int curl_dbg_fclose(FILE *file, int line, const char *sou
+ rce);
+ | ^~~~~~~~~~~~~~~
+ ```
+ Ref: https://ci.appveyor.com/project/curlorg/curl/builds/49840554/job/a4aoet1
+ 7e9qnqx1a#L362
+
+ After: https://ci.appveyor.com/project/curlorg/curl/builds/49843735/job/hbo2u
+ ah2vj0ns523
+
+ Ref: #13689 (CI testing this PR with `DEBUGBUILD`/`CURLDEBUG`/shared-static c
+ ombinations)
+ Depends-on: #13694
+ Depends-on: #13800
+ Closes #13705
+
+- lib: fix gcc warning in certain debug builds
+
+ ```
+ curl/lib/http_aws_sigv4.c:536:10: error: 'clock' may be used uninitialized [-
+ Werror=maybe-uninitialized]
+ 536 | time_t clock;
+ | ^~~~~
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9158755123/job/25177765000#ste
+ p:13:79
+
+ Cherry-picked from #13718
+ Closes #13800
+
+- cmake: always build unit tests with the `testdeps` target
+
+ Before this patch, the `testdeps` build target required `-DCURLDEBUG`
+ be set either via `ENABLE_DEBUG=ON` or `ENABLE_CURLDEBUG=ON` to build
+ the curl unit tests.
+
+ After fixing build issues in #13694, we can drop this requirement and
+ build unit tests unconditionally.
+
+ Depends-on: #13694
+ Depends-on: #13697 (fix unit test issue revealed by Old Linux CI job)
+ Follow-up to 39e7c22bb459c2e818f079984989a26a09741860 #11446
+ Closes #13698
+
+- CI: disable dependency tracking in most autotools builds
+
+ For better build performance. Dependency tracking causes a build
+ overhead while compiling to help a subsequent build, but in CI there is
+ never one and the extra work is discarded.
+
+ Closes #13794
+
+- build: untangle `UNITTESTS` and `DEBUGBUILD` macros
+
+ - fix `DEBUGBUILD` guards that should be `UNITTESTS`, in libcurl code
+ used by unit tests.
+ - fix guards for libcurl functions used in unit tests only.
+ - sync `UNITTEST` attribute between declarations and definitions.
+ - drop `DEBUGBUILD` guard from test `unit2600`.
+ - fix guards for libcurl HSTS code used by both a unit test (`unit1660`)
+ and `test0446`.
+ - update an existing AppVeyor CI job to test the issues fixed.
+
+ This fixes building tests with `CURLDEBUG` enabled but `DEBUGBUILD`
+ disabled. This can happen when building tests with CMake with
+ `ENABLE_DEBUG=ON` in Release config, or with `ENABLE_CURLDEBUG=ON`
+ and _without_ `ENABLE_DEBUG=ON`. Possibly also with autotools
+ when using `--enable-curldebug` without `--enable-debug`.
+
+ Test results:
+ - before:
+ https://ci.appveyor.com/project/curlorg/curl/builds/49835609
+ https://ci.appveyor.com/project/curlorg/curl/builds/49898529/job/k8qpbs8idb
+ y70smw
+ https://github.com/curl/curl/actions/runs/9259078835/job/25470318167?pr=137
+ 98#step:13:821
+ - after: https://ci.appveyor.com/project/curlorg/curl/builds/49839255
+ (the two failures are unrelated, subject to PR #13705)
+
+ Ref: #13592 (issue discovery)
+ Ref: #13689 (CI testing this PR with `DEBUGBUILD`/`CURLDEBUG` combinations)
+ Closes #13694
+
+- GHA: ignore flaky MQTT and FTP test results [ci skip]
+
+ MQTT / OmniOS:
+ ```
+ TESTFAIL: These test cases failed: 1190 1198 3017
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9258522297/job/25468730731?pr=
+ 13694#step:3:10251
+
+ MQTT / OmniOS:
+ ```
+ TESTFAIL: These test cases failed: 1194 2200 2203 2205
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9150523540/job/25155409832#ste
+ p:3:10233
+
+ FTP / OmniOS:
+ ```
+ TESTFAIL: These test cases failed: 1096
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9150702711/job/25155793948#ste
+ p:3:10247
+
+ FTP / OmniOS:
+ ```
+ TESTFAIL: These test cases failed: 381
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9163863822/job/25193897640#ste
+ p:3:10230
+
+ FTP / OmniOS:
+ ```
+ TESTFAIL: These test cases failed: 340
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9233804752/job/25406671742?pr=
+ 13771#step:3:10245
+
+ Ref: https://github.com/curl/curl/pull/13583#issuecomment-2119376898
+
+- CI: tidy up skipping tests build/run in Windows jobs
+
+ Simplify controlling whether to build and/run tests in a CI job.
+
+ Apply the TFLAGS='skipall' (do not build nor run tests) or
+ 'skiprun' (build, but do not run) method already used with old-mingw-w64
+ and msvc jobs to existing Windows jobs in GHA and AppVeyor.
+
+ Also:
+ - add Cygwin/cmake test build and run steps while here.
+ - replace `DISABLED_TESTS` with `TFLAGS` in AppVeyor.
+
+ Closes #13796
+
+- cmake: use `APPLE` instead of `CMAKE_SYSTEM_NAME` string
+
+ Follow-up to a86254b39307af1a53735b065a382567805cd9b8 #12515
+ Closes #13713
+
+- cmake: whitespace, formatting/tidy-up in comments
+
+ Also correct casing in a few option descriptions.
+
+ Closes #13711
+
+- cmake: allow `ENABLE_CURLDEBUG=OFF` with `ENABLE_DEBUG=ON`
+
+ Before this patch, `ENABLE_CURLDEBUG` (memory tracking) was
+ unconditionally enabled when `ENABLE_DEBUGBUILD` was set. This made
+ testing some build configurations complicated. To fix it, this patch
+ makes `ENABLE_CURLDEBUG` to receive the value of `ENABLE_DEBUG` by
+ default, while allowing free override by the user.
+
+ This allows to use the config:
+ `ENABLE_DEBUGBUILD=ON ENABLE_CURLDEBUG=OFF`
+ to enable debug features, without also enabling memory tracking.
+
+ This is important because some other build methods allow to set one of
+ these features but not the other. This patch allows to test any
+ combination with CMake.
+
+ This makes it unnecessary to use the workaround of passing
+ `-DDEBUGBUILD` via `CMAKE_C_FLAGS`. Which has the disadvantage that our
+ CMake logic cannot easily detect it, e.g. for disabling symbol hiding on
+ Windows for `ENABLE_DEBUG`/`DEBUGBUILD` builds.
+
+ Cherry-picked from #13718
+ Closes #13792
+
+- cmake: `ENABLE_DEBUG=ON` to always set `-DDEBUGBUILD`
+
+ Before this patch `ENABLE_DEBUG=ON` always enabled the TrackMemory
+ (aka `ENABLE_CURLDEBUG=ON`) feature, but required the `Debug` CMake
+ configration to actually enable curl debug features
+ (aka `-DDEBUGBUILD`).
+
+ Curl debug features do not require compiling with C debug options. This
+ also made enabling debug features unintuitive and complicated to use.
+ Due to other issues (subject to PR #13694) it also caused an error in
+ default (and `Release`/`MinSizeRel`/`RelWithDebInfo`) configs, when
+ building the `testdeps` target:
+ ```
+ ld: CMakeFiles/unit1395.dir/unit1395.c.o: in function `test':
+ unit1395.c:(.text+0x1a0): undefined reference to `dedotdotify'
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9037287098/job/24835990826#ste
+ p:3:2483
+
+ Fix it by always defining `DEBUGBUILD` when setting `ENABLE_DEBUG=ON`.
+ Decoupling this option from the selected CMake configuration.
+
+ Note that after this patch `ENABLE_DEBUG=ON` unconditionally enables
+ curl debug features. These features are insecure and unsuited for
+ production. Make sure to omit this option when building for production
+ in default, `Release` (and other not-`Debug`) modes.
+
+ Also delete a workaround no longer necessary in GHA CI jobs.
+
+ Ref: 1a62b6e68c08c7e471ff22dd92932aba7e026817 (2015-03-03)
+ Ref: #13583
+ Closes #13592
+
+- GHA: add autotools mingw-64, build-only job
+
+ Cherry-picked from #13718
+ Closes #13793
+
+- GHA: add three MSVC jobs
+
+ Continuing the theme, add 3 MSVC jobs with tests, matching
+ configurations used on AppVeyor. MSVC versions are identical:
+ 19.39.33523.0 + Windows SDK 10.0.22621.0.
+
+ Also enable websockets, and build examples. Tests are run in parallel
+ (`-j14`), with improved performance.
+
+ Job performance:
+ ```
+ AppVeyor GHA
+ w/examples
+ -------- ----------
+ CMake, VS2022, Debug, x64, Schannel, Static, Unicode 38m 4s 11m57s
+ CMake, VS2022, Debug, x64, no SSL, Static 35m15s 12m 6s
+ CMake, VS2022, Debug, x64, no SSL, Static, HTTP only 25m25s 10m36s
+ ```
+ Based on these runs:
+ https://ci.appveyor.com/project/curlorg/curl/builds/49884748
+ https://github.com/curl/curl/actions/runs/9229448468
+
+ This is the first time examples are built in CI with MSVC: Fix all
+ warnings and errors that came up via
+ d4b85890555388bec212b75f47a5c1a48705b156 #13771.
+
+ Closes #13766
+
+- GHA: add three old (gcc 6, 7, 9) mingw-w64 jobs
+
+ Re-implement old mingw-w64 jobs in GHA. This allows to use the latest
+ Windows runners, replacing Windows Server 2012 R2 (gcc 6) and Windows
+ Server 2016 (gcc 7, 9) with Windows Server 2022.
+
+ GHA runners are also significantly faster, and allow running tests in
+ parallel (`-j14`). It also offloads 3 more long-running jobs from
+ AppVeyor CI.
+
+ These jobs download (then cache) the mingw-w64 packages from their
+ original location, which allows flexibility in choosing which versions
+ and flavours (win32/POSIX, SEH/DWARF, 64/32-bit) we want to test in CI.
+ The new jobs use these distros:
+ - https://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20
+ Win64/Personal%20Builds/mingw-builds/ (for gcc 7, same as on AppVeyor)
+ - https://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20
+ Win32/Personal%20Builds/mingw-builds/ (for gcc 6, same as on AppVeyor)
+ - https://winlibs.com/ (for gcc 9)
+
+ I matched existing AppVeyor job configs, with these differences:
+ - gcc 6.4.0 instead of 6.3.0.
+ (same distro as on AppVeyor, but the latest bugfix release)
+ - gcc 9.5.0 instead of 9.1.0 and a different (but compatible) binary distro.
+ (in AppVeyor this relies on an old MSYS2 pre-installed on the runner)
+ - using win32 builds instead of posix for gcc 6.4.0 and 7.3.0.
+ - websockets enabled.
+ - always build examples.
+ - always build tests (this wasn't done for 6.4.0 with AppVeyor CI).
+
+ I did not replicate existing test exclusions, and oddly enough the few
+ failures (so far) were different from MSYS2 jobs and also from their
+ AppVeyor CI counterparts.
+
+ Also:
+ - delete redundant (default) `-u` option from `cygpath` calls.
+ - allow matrix options to override default ones in CMake.
+ - detect and use Windows-supplied curl for `TFLAGS` `-ac` option.
+ (it's available in modern runners.)
+ - delete the 3 AppVeyor CI jobs now replicated in GHA.
+ - appveyor: prefer `SYSTEMROOT` over `WINDIR`.
+ - tidy-up quotes.
+
+ Job performance:
+ ```
+ AppVeyor GH
+ A
+ w/
+ examples
+ w/
+ tests
+ -------- --
+ --------
+ CMake, mingw-w64, gcc 6, Debug, x86, Schannel, Static, no-unity 1m25s 8
+ m50s
+ CMake, mingw-w64, gcc 7, Debug, x64, Schannel, Static, Unicode 31m45s 9
+ m39s
+ CMake, mingw-w64, gcc 9, Debug, x64, Schannel, Static 28m25s 13
+ m38s
+ ```
+ Based on these runs:
+ https://ci.appveyor.com/project/curlorg/curl/builds/49880799
+ https://github.com/curl/curl/actions/runs/9218292508
+
+ Notice that building examples and tests is time consuming.
+
+ We can tweak any build parameter as necessary to make them more useful
+ and/or without clogging the job queue or introducing flakiness.
+
+ Closes #13759
+
+Daniel Stenberg (27 May 2024)
+
+- TODO: remove some old, clarify, add something
+
+ Closes #13788
+
+- TODO: Add "Share CA cache" + "CA caching to more TLS backends"
+
+ Closes #13787
+
+Viktor Szakats (26 May 2024)
+
+- runtests: sort test IDs in summary lines
+
+ Changing this output:
+ ```
+ TESTFAIL: These test cases failed: 2301 2303 2302 2307
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9228638364/job/25393106631#ste
+ p:6:21181
+
+ To:
+ ```
+ TESTFAIL: These test cases failed: 2301 2302 2303 2307
+ ```
+
+ Cherry-picked from #13766
+ Closes #13774
+
+- examples: fix compiling with MSVC
+
+ - `websocket.c`: use `Sleep()` on Windows.
+ `sleep()` and `unistd.h` are not available in MSVC.
+
+ - `http2-upload.c`: use local `gettimeofday()` implementation when
+ compiled with MSVC.
+ (Alternate solution is to disable the trace function for MSVC.)
+ Public domain code copied and adapted from libssh2:
+ https://github.com/libssh2/libssh2/blob/e973493f992313b3be73f51d3f7ca6d52e2
+ 88558/src/misc.c#L719-L743
+
+ - silence compiler warning for deprecated `inet_addr()`.
+ Also drop duplicate winsock2 include.
+ ```
+ curl\docs\examples\externalsocket.c(125,32): error C2220: the following war
+ ning is treated as an error [curl\bld\docs\examples\curl-example-externalsock
+ et.vcxproj]
+ curl\docs\examples\externalsocket.c(125,32): warning C4996: 'inet_addr': Us
+ e inet_pton() or InetPton() instead or define _WINSOCK_DEPRECATED_NO_WARNINGS
+ to disable deprecated API warnings [curl\bld\docs\examples\curl-example-e
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9227337318/job/25389073450#s
+ tep:4:95
+
+ - silence an MSVC compiler warning. This is in conflict with `checksrc`
+ rules, so silence the rule in favour of the warning-free C syntax.
+ ```
+ curl\docs\examples\multi-legacy.c(152,1): error C2220: the following warnin
+ g is treated as an error [curl\bld\docs\examples\curl-example-multi-legacy.vc
+ xproj]
+ curl\docs\examples\multi-legacy.c(152,1): warning C4706: assignment within
+ conditional expression [curl\bld\docs\examples\curl-example-multi-legacy.vcxp
+ roj]
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9227337318/job/25389073450#s
+ tep:4:226
+
+ - do not use `sys/time.h` and `unistd.h` in Windows builds.
+ Some of these includes look unnecessary. Subject to another PR.
+
+ Cherry-picked from #13766
+ Closes #13771
+
+Jonathan Matthews (26 May 2024)
+
+- docs/cmdline-opts: fix mail-auth example TLD typo
+
+ Closes: #13784
+ Reviewed-by: Daniel Gustafsson <daniel@yesql.se>
+
+Daniel Stenberg (26 May 2024)
+
+- libssh: remove CURLOPT_SSL_VERIFYHOST check
+
+ It was never meant for SSH: it should rely on the knownhosts file (if
+ set) in the same way libssh2 already does.
+
+ Reported-by: James Abbatiello
+ Fixes #13767
+ Closes #13781
+
+Stefan Eissing (26 May 2024)
+
+- multi: add multi->proto_hash, a key-value store for protocol data
+
+ - add `Curl_hash_add2()` that passes a destructor function for
+ the element added. Call element destructor instead of hash
+ destructor if present.
+ - multi: add `proto_hash` for protocol related information,
+ remove `struct multi_ssl_backend_data`.
+ - openssl: use multi->proto_hash to keep x509 shared store
+ - schannel: use multi->proto_hash to keep x509 shared store
+ - vtls: remove Curl_free_multi_ssl_backend_data() and its
+ equivalents in the TLS backends
+
+ Closes #13345
+
+Jan Venekamp (25 May 2024)
+
+- tests: add pytest for --ciphers and --tls13-ciphers options
+
+ Closes #13530
+
+Orgad Shaneh (25 May 2024)
+
+- tool_operate: avoid explicitly setting verifypeer to 1
+
+ Also for the proxy verison. It is the default, just like verifyhost,
+ since a long time.
+
+ Closes #13704
+
+- tests: extend user/password parsing test1620
+
+ Closes #13756
+
+Alejandro R. Sedeño (25 May 2024)
+
+- configure: use `$EGREP` in place of `grep -E`
+
+ `$EGREP` is set based on an earlier test in configure so that we can
+ work with systems that have `egrep` and a `grep` that does not support
+ `-E`.
+
+ Closes #13780
+
+renovate[bot] (25 May 2024)
+
+- ci: update dependency awslabs/aws-lc to v1.28.0
+
+ Closes #13770
+
+Jan Venekamp (25 May 2024)
+
+- tests: test_17_ssl_use.py clarify mbedtls TLSv1.3 support
+
+ Closes #13779
+
+Stefan Eissing (25 May 2024)
+
+- http: write last header line late
+
+ - HEADERFUNCTIONS might inspect response properties like
+ CURLINFO_CONTENT_LENGTH_DOWNLOAD_T on seeing the last header line. If
+ the line is being written before this is initialized, values are not
+ available.
+
+ - write the last header line late when analyzing a HTTP response so that
+ all information is available at the time of the writing.
+
+ - add test1485 to verify that CURLINFO_CONTENT_LENGTH_DOWNLOAD_T works
+ on seeing the last header.
+
+ Fixes #13752
+ Reported-by: Harry Sintonen
+ Closes #13757
+
+Dan Fandrich (24 May 2024)
+
+- tests: use exec when spawning nghttpx
+
+ This stops keeping perl and shell processes around that are no longer
+ needed, plus it eliminates an unneeded shell message when the server is
+ later terminated.
+
+ Closes #13772
+
+Viktor Szakats (24 May 2024)
+
+- GHA: ignore flaky test 3017 (MQTT) on OpenBSD
+
+ ```
+ TESTFAIL: These test cases failed: 3017
+ ```
+ Ref: https://github.com/curl/curl/actions/runs/9223543272/job/25376999226?pr=
+ 13759#step:3:16326
+ Ref: https://github.com/curl/curl/actions/runs/9230183764/job/25397883193?pr=
+ 13766#step:3:16345
+
+ Ref: https://github.com/curl/curl/pull/13583#issuecomment-2119376898
+
+Joseph Chen (24 May 2024)
+
+- build: add more supported attributes to the IAR compiler
+
+ Closes #13744
+
+Viktor Szakats (24 May 2024)
+
+- cmake: fix test 1013 with websockets enabled and no TLS
+
+ test 1013 is 'Compare curl --version with curl-config --protocols'.
+
+ Ref: https://github.com/curl/curl/actions/runs/9228363859/job/25392251955
+
+ Closes #13769
+
+- GHA: stop deleting curl in non-native workflows
+
+ We do it in Cirrus CI, but for some platforms it's not possible to
+ delete it and tests work anyway.
+
+ The test runner also runs `../src/curl` by default, which is always the
+ one freshly built. The runner may also need the system curl to talk to
+ APIs when needed.
+
+ Also:
+ - stop setting `CURL` env. This isn't picked up by the runners,
+ and works out of the box anyway.
+ - quote an option just in case.
+
+ Follow-up to 90e644f944969bb11c6448bf50c6d441b5c0b1e6 #13583
+ Closes #13765
+
+Jay Satiro (24 May 2024)
+
+- openssl: stop duplicate ssl key logging for legacy OpenSSL
+
+ - Don't call the keylog function if it has already logged the key.
+
+ For old OpenSSL versions and its forks that do not have support for
+ OpenSSL's keylog callback, libcurl has its own legacy key logging
+ function that logs the TLS 1.2 (and earlier) key (client random + master
+ key) on a single line.
+
+ Prior to this change, since e7de80e8 (precedes 8.8.0), the legacy key
+ logging function could write the same key line more than once (usually
+ twice) due to some incorrect logic.
+
+ Closes https://github.com/curl/curl/pull/13683
+
+Stefan Eissing (24 May 2024)
+
+- transfer: remove curl_upload_refill_watermark, no longer used
+
+ the define applied to upload buffers which we removed
+
+ Closes #13764
+
+Daniel Stenberg (24 May 2024)
+
+- RELEASE-NOTES: synced
+
+Viktor Szakats (24 May 2024)
+
+- cmake: fix brotli lib order
+
+ Fix root cause that caused missing symbols when linking brotli
+ statically with e.g. binutils `ld` (and any other "picky" linker,
+ or "traditional" linker as CMake now calls them).
+
+ Also drop existing workaround that added brotli libs twice to the lib
+ list.
+
+ ```
+ x86_64-w64-mingw32-ld: .../curl/brotli/_bld/usr/lib/libbrotlidec.a(decode.c.o
+ bj):decode.c:(.text$ProcessCommands[ProcessCommands]+0xbb5): undefined refere
+ nce to `BrotliTransformDictionaryWord'
+ x86_64-w64-mingw32-ld: .../curl/brotli/_bld/usr/lib/libbrotlidec.a(decode.c.o
+ bj):decode.c:(.text$SafeProcessCommands[SafeProcessCommands]+0xe8a): undefine
+ d reference to `BrotliTransformDictionaryWord'
+ x86_64-w64-mingw32-ld: .../curl/brotli/_bld/usr/lib/libbrotlidec.a(decode.c.o
+ bj):decode.c:(.rdata$.refptr._kBrotliContextLookupTable[.refptr._kBrotliConte
+ xtLookupTable]+0x0): undefined reference to `_kBrotliContextLookupTable'
+ x86_64-w64-mingw32-ld: .../curl/brotli/_bld/usr/lib/libbrotlidec.a(decode.c.o
+ bj):decode.c:(.rdata$.refptr._kBrotliPrefixCodeRanges[.refptr._kBrotliPrefixC
+ odeRanges]+0x0): undefined reference to `_kBrotliPrefixCodeRanges'
+ x86_64-w64-mingw32-ld: .../curl/brotli/_bld/usr/lib/libbrotlidec.a(state.c.ob
+ j):state.c:(.text$BrotliDecoderStateInit[BrotliDecoderStateInit]+0x21): undef
+ ined reference to `BrotliDefaultAllocFunc'
+ x86_64-w64-mingw32-ld: .../curl/brotli/_bld/usr/lib/libbrotlidec.a(state.c.ob
+ j):state.c:(.text$BrotliDecoderStateInit[BrotliDecoderStateInit]+0x2f): undef
+ ined reference to `BrotliDefaultFreeFunc'
+ x86_64-w64-mingw32-ld: .../curl/brotli/_bld/usr/lib/libbrotlidec.a(state.c.ob
+ j):state.c:(.text$BrotliDecoderStateInit[BrotliDecoderStateInit]+0x10e): unde
+ fined reference to `BrotliSharedDictionaryCreateInstance'
+ x86_64-w64-mingw32-ld: .../curl/brotli/_bld/usr/lib/libbrotlidec.a(state.c.ob
+ j):state.c:(.text$BrotliDecoderStateCleanup[BrotliDecoderStateCleanup]+0xf4):
+ undefined reference to `BrotliSharedDictionaryDestroyInstance'
+ collect2: error: ld returned 1 exit status
+ ```
+
+ Breakage reproducible with curl-for-win config "`win-gcc`" and deleting
+ the `LDFLAGS+=' -Wl,--start-group'` line from its `curl.sh` script.
+ (Above line still required for some non-brotli cases, e.g. libssh2 and
+ zlib.)
+
+ Assisted-by: Kai Pastor
+ Ref: https://github.com/curl/curl/pull/10857#discussion_r1611714989
+ Follow-up to 1e3319a167d2f32d295603167486e9e88af9bb4e #10857
+ Closes #13761
+
+Pavel Pavlov (24 May 2024)
+
+- cmake: fix building in unity mode
+
+ - Fix sha256 and sha512 duplicate macro names (eg function-like macro Ch
+ is now Sha256_Ch and Sha512_Ch).
+
+ - Avoid defining short defines like R, S. (eg S is now Sha256_S).
+
+ Closes https://github.com/curl/curl/pull/13751
+
+Jay Satiro (24 May 2024)
+
+- winbuild: remove outdated WIN32 defines
+
+ - Remove all instances in the makefile of compiler option /DWIN32.
+
+ This is a follow-up to e9a7d4a1 which replaced all defined(WIN32) checks
+ with defined(_WIN32) in the codebase, since only the latter is
+ automatically defined by all compilers for Windows builds.
+
+ Bug: https://github.com/curl/curl/pull/13739#issuecomment-2123937859
+ Reported-by: Viktor Szakats
+
+ Closes https://github.com/curl/curl/pull/13742
+
+renovate[bot] (24 May 2024)
+
+- ci: update github/codeql-action digest to 9fdb3e4
+
+ Closes #13726
+
+Pavel Pavlov (23 May 2024)
+
+- asyn-thread: avoid using GetAddrInfoExW with impersonation
+
+ Multiple reports suggest that GetAddrInfoExW fails when impersonation is
+ used. This PR checks if thread is impersonating and avoids using
+ GetAddrInfoExW api.
+
+ Reported-by: Keerthi Timmaraju
+ Assisted-by: edmcln on github
+ Fixes #13612
+ Closes #13738
+
+Stefan Eissing (23 May 2024)
+
+- transfer: conn close on paused upload
+
+ - add 2 variations on test_07_42 which PAUSEs uploads
+ and response connections terminating either right away
+ or after the 100-continue response
+ - when detecting the connection being closed in transfer.c
+ readwrite_data(), clear ALL send bits in data->req.keepon.
+ It no longer makes send to wait for a KEEP_SEND_PAUSE or HOLD.
+ - in the protocol client writer add the check for incomplete
+ response bodies. When an EOS is seen and the length is known,
+ check that and fail if bytes are missing.
+
+ Reported-by: Sergey Bronnikov
+ Fixes #13740
+ Closes #13750
+
+- CI GHA: add vsftpd to ngtcp2-linux runs
+
+ - not using HTTP/3, but gnutls does not seem to run
+ somewhere else right now
+
+ Closes #13760
+
+Orgad Shaneh (23 May 2024)
+
+- GHA: increase timeout for Cygwin autotools build tests step
+
+ Apparently 10 minutes are not (always) enough:
+ https://github.com/curl/curl/actions/runs/9197003907/job/25296439556#step:8:1
+ 936
+
+ Closes #13753
+
+Stefan Eissing (22 May 2024)
+
+- mbedtls: send close-notify on close
+
+ - send the TLS close notify message when cloding down
+ the mbedtls connection filter
+ - this is a "least" effort version and, as other TLS filters,
+ is lacking a graceful send/receive/timeout for a really
+ clean shutdown.
+
+ Closes #13745
+
+- mbedtls: check version for cipher id
+
+ mbedtls_ssl_get_ciphersuite_id_from_ssl() seems to have been added in
+ mbedtls 3.2.0. Check for that version.
+
+ Closes #13749
+
+Viktor Szakats (22 May 2024)
+
+- cmake: fix building with both md4 and md5 in unity mode
+
+ Macro and static function names were colliding between
+ `lib/md4.c` and
+ `lib/md5.c`.
+
+ Fix it by namespacing these symbols.
+
+ Seen with a basic macOS build using these options:
+ `-DCMAKE_UNITY_BUILD=ON -DCURL_USE_SECTRANSP=ON`
+
+ Closes #13737
+
+Daniel Stenberg (22 May 2024)
+
+- docs/Makefile.am: make curl-config.1 install
+
+ on "make install" like it should
+
+ Follow-up to 60971d665b9b1df87082
+
+ Closes #13741
+
+dependabot[bot] (22 May 2024)
+
+- GHA: bump actions/checkout from 4.1.4 to 4.1.6
+
+ Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.4 to 4
+ .1.6.
+ - [Release notes](https://github.com/actions/checkout/releases)
+ - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
+ - [Commits](https://github.com/actions/checkout/compare/0ad4b8fadaa221de15dce
+ c353f45205ec38ea70b...a5ac7e51b41094c92402da3b24376905380afc29)
+
+ ---
+ updated-dependencies:
+ - dependency-name: actions/checkout
+ dependency-type: direct:production
+ update-type: version-update:semver-patch
+ ...
+
+ Signed-off-by: dependabot[bot] <support@github.com>
+
+ Closes #13720
+
+Stefan Eissing (22 May 2024)
+
+- pytest: add ftp upload tests
+
+ - refs #13556
+ - allow anon uploads on vsftpd test server
+ - add test_30_05 for plain upload of 1k, 100k, 1m
+ - add test_31_05 for SSL upload of 1k, 100k, 1m
+ - verify file size and contents
+
+ Closes #13734
+
+- test: add test1546, chunked not last transfer encoding
+
+ with more than one transfer-encoding, 'chunked' must be the last added
+ to the writer stack (and therefore the first to decode). RFC 9112, ch.
+ 6.1.
+
+ Closes #13736
+
+- test: add test1484, for HEAD with content
+
+ - test HEAD request with 'Transfer-Encoding:chunked' and
+ non-encoded response content
+ - verifies #13725
+
+ Closes #13735
+
+Daniel Stenberg (22 May 2024)
+
+- RELEASE-NOTES: synced
+
+ bump to 8.8.1 for now
+
+Viktor Szakats (22 May 2024)
+
+- (lib)curl.rc: set debug flag also for `CURLDEBUG` and `UNITTESTS`
+
+ These macros also enable debug features in both libcurl and curl.
+ Enable `VS_FF_DEBUG` version resource flag when they are set.
+
+ Closes #13730
+
+Jay Satiro (22 May 2024)
+
+- winbuild: fix PE version info debug flag
+
+ - Only set PE file flag VS_FF_DEBUG if curl.exe and libcurl.dll were
+ built with winbuild option DEBUG=yes which builds with debug info.
+
+ VS_FF_DEBUG is a PE flag (Portable Executable file flag - dll, exe, etc)
+ that indicates the file contains or was built with debug info.
+
+ Prior to this change when winbuild was used to build curl, curl.exe
+ and libcurl.dll always had VS_FF_DEBUG set, regardless of build option
+ DEBUG=yes/no, due to some bad logic.
+
+ Closes https://github.com/curl/curl/pull/13739
+
Version 8.8.0 (22 May 2024)
Daniel Stenberg (22 May 2024)
@@ -5972,4738 +11730,3 @@ Daniel Stenberg (5 Feb 2024)
- THANKS: add Dmitry Tretyakov
... since I missed to give credit to the report in the fix of #12861
-
-Stefan Eissing (5 Feb 2024)
-
-- openssl-quic: check on Windows that socket conv to int is possible
-
- Fixes #12861
- Closes #12865
-
-Daniel Stenberg (5 Feb 2024)
-
-- tool_cb_hdr: only parse etag + content-disposition for 2xx
-
- ... and ignore them for other response codes.
-
- Reported-by: Harry Sintonen
- Closes #12866
-
-- md4: include strdup.h for the memdup proto
-
- Reported-by: Erik Schnetter
- Fixes #12849
- Closes #12863
-
-Joel Depooter (5 Feb 2024)
-
-- docs: add missing slashes to SChannel client certificate documentation
-
- When setting the CURLOPT_SSLCERT option to a certificate thumprint, it
- is required to have a backslash between the "store location", "store
- name" and "thumbprint" tokens. These slashes were present in the
- previous documentation, but were missed in the transition to markdown
- documentation.
-
- Closes #12854
-
-Stefan Eissing (5 Feb 2024)
-
-- HTTP/2: write response directly
-
- - use the new `Curl_xfer_write_resp()` to write incoming responses
- directly to the client
- - eliminates `stream->recvbuf`
- - memory consumption on parallel transfers minimized
-
- Closes #12828
-
-Daniel Stenberg (5 Feb 2024)
-
-- cookie.md: provide an example sending a fixed cookie
-
- Closes #12868
-
-Lars Kellogg-Stedman (5 Feb 2024)
-
-- ALTSVC.md: correct a typo
-
- The ALPN documentation erroneously referred to a "host number" instead
- of a "port number".
-
- Closes #12852
-
-Boris Verkhovskiy (5 Feb 2024)
-
-- proxy1.0.md: fix example
-
- Closes #12856
-
-Chris Webb (5 Feb 2024)
-
-- configure: add --disable-docs flag
-
- Building man pages from curldown sources now requires perl. Add a
- --disable-docs flag to configure to enable building and installing
- without documentation where perl is not available or man pages are not
- required. This is selected automatically (with a warning) when perl is
- not found by configure.
-
- Fixes #12832
- Closes #12857
-
-Faraz Fallahi (5 Feb 2024)
-
-- connect.c: fix typo
-
- Closes #12858
-
-Daniel Stenberg (1 Feb 2024)
-
-- sendf: ignore response body to HEAD
-
- and mark the stream for close, but return OK since the response this far
- was ok - if headers were received. Partly because this is what curl has
- done traditionally.
-
- Test 499 verifies. Updates test 689.
-
- Reported-by: Sergey Bronnikov
- Bug: https://curl.se/mail/lib-2024-02/0000.html
- Closes #12842
-
-- ftp: treat a 226 arriving before data as a signal to read data
-
- For active mode transfers.
-
- Due to some interesting timing, curl can sometimes get the 226 (transfer
- complete) over the control channel first, before the data connection
- signals readability. If this happens, use that as a signal to check the
- data connection.
-
- Additionally, set the socket filter in listen mode *before* the
- PORT/EPRT command is issued, to reduce the risk that the little time gap
- could interfere.
-
- This issue never reproduced for me on Debian and takes several hundred
- rounds for me to trigger on my mac.
-
- Reported-by: Stefan Eissing
- Fixes #12823
- Closes #12841
-
-Patrick Monnerat (1 Feb 2024)
-
-- OS400: avoid using awk in the build scripts
-
- Awk is a PASE program and its use may cause a failure depending on the
- CCSID of the calling script (IBM bug?).
-
- For this reason, revert to an sed-only solution to extract the exported
- symbols from the header files.
-
- Closes #12826
-
-Jan Macku (1 Feb 2024)
-
-- docs: remove `mk-ca-bundle.1` from `man_MANS`
-
- It was accidentally added in https://github.com/curl/curl/pull/12730
-
- Co-authored-by: Lukáš Zaoral <lzaoral@redhat.com>
- Signed-off-by: Jan Macku <jamacku@redhat.com>
-
- Follow-up to eefcc1bda4bccd800f5a56a0fe17a2f44a96e88b
- Closes #12843
-
-Daniel Stenberg (1 Feb 2024)
-
-- RELEASE-NOTES: synced
-
- and bump to 8.6.1 for now
-
-- cmdline-docs/Makefile: avoid using a fixed temp file name
-
- By appending the pid number two different runs at the same time will not
- trample over the same file.
-
- Reported-by: Jon Rumsey
- Fixes #12829
- Closes #12839
-
-- asyn-thread: use wakeup_close to close the read descriptor
-
- Reported-by: Dan Fandrich
- Ref: #12834
- Closes #12836
-
-Stefan Eissing (1 Feb 2024)
-
-- ntml_wb: fix buffer type typo
-
- Closes #12825
-
-Daniel Stenberg (1 Feb 2024)
-
-- tool_operate: do not set CURLOPT_QUICK_EXIT in debug builds
-
- Since it allows (small) memory leaks that interfere with torture tests
- and regular memory-leak checks.
-
- Reported-by: Dan Fandrich
- Fixes #12834
- Closes #12835
-
-Boris Verkhovskiy (31 Jan 2024)
-
-- form-string.md: correct the example
-
- Closes #12822
-
-Version 8.6.0 (31 Jan 2024)
-
-Daniel Stenberg (31 Jan 2024)
-
-- RELEASE-NOTES: synced
-
- curl 8.6.0
-
-- THANKS: new contributors from 8.5.0
-
-Jay Satiro (31 Jan 2024)
-
-- cd2nroff: use perl 'strict' and 'warnings'
-
- - Use strict and warnings pragmas.
-
- - If open() fails then show the reason.
-
- - Set STDIN io layer :crlf so that input is properly read on Windows.
-
- - When STDIN is used as input, the filename $f is now set to "STDIN".
-
- Various error messages in single() use $f for the filename and this way
- it is not undefined when STDIN.
-
- Closes https://github.com/curl/curl/pull/12819
-
-Daniel Stenberg (30 Jan 2024)
-
-- cd2nroff: fix duplicate output issue
-
- Assisted-by: Jay Satiro
- Fixes https://github.com/curl/curl-www/issues/321
- Closes #12818
-
-- lib: error out on multissl + http3
-
- Since the QUIC/h3 code has no knowledge or handling of multissl it might
- bring unintended consequences if we allow it.
-
- configure, cmake and curl_setup.h all now reject this combination.
-
- Assisted-by: Viktor Szakats
- Assisted-by: Gisle Vanem
- Ref: #12806
- Closes #12807
-
-Patrick Monnerat (29 Jan 2024)
-
-- OS400: sync ILE/RPG binding
-
- Also do not force git CRLF line endings on *.cmd files for OS400.
-
- Closes #12815
-
-Viktor Szakats (28 Jan 2024)
-
-- build: delete/replace 3 more clang warning pragmas
-
- - tool_msgs: delete redundant `-Wformat-nonliteral` suppression pragma.
-
- - whitespace formatting in `mprintf.h`, lib518, lib537.
-
- - lib518: fix wrong variable in `sizeof()`.
-
- - lib518: bump variables to `rlim_t`.
- Follow-up to e2b394106d543c4615a60795b7fdce04bd4e5090 #1469
-
- - lib518: sync error message with lib537
- Follow-up to 365322b8bcf9efb6a361473d227b70f2032212ce
-
- - lib518, lib537: replace `-Wformat-nonliteral` suppression pragmas
- by reworking test code.
-
- Follow-up to 5b286c250829e06a135a6ba998e80beb7f43a734 #12812
- Follow-up to aee4ebe59161d0a5281743f96e7738ad97fe1cd4 #12803
- Follow-up to 09230127589eccc7e01c1a7217787ef8e64f3328 #12540
- Follow-up to 3829759bd042c03225ae862062560f568ba1a231 #12489
-
- Reviewed-by: Daniel Stenberg
- Closes #12814
-
-Richard Levitte (27 Jan 2024)
-
-- cmake: freshen up docs/INSTALL.cmake
-
- - Turn docs/INSTALL.cmake into a proper markdown file,
- docs/INSTALL-CMAKE.md
- - Move things around to divide the description into configuration,
- building and installing sections
- - Mention the more modern cmake options to configure, build and install,
- but also retain the older variants as fallbacks
-
- Closes #12772
-
-Viktor Szakats (27 Jan 2024)
-
-- build: delete/replace clang warning pragmas
-
- - delete redundant warning suppressions for `-Wformat-nonliteral`.
- This now relies on `CURL_PRINTF()` and it's theoratically possible
- that this macro isn't active but the warning is. We're ignoring this
- as a corner-case here.
-
- - replace two pragmas with code changes to avoid the warnings.
-
- Follow-up to aee4ebe59161d0a5281743f96e7738ad97fe1cd4 #12803
- Follow-up to 09230127589eccc7e01c1a7217787ef8e64f3328 #12540
- Follow-up to 3829759bd042c03225ae862062560f568ba1a231 #12489
-
- Reviewed-by: Daniel Stenberg
- Closes #12812
-
-Daniel Stenberg (27 Jan 2024)
-
-- RELEASE-NOTES: synced
-
-- http: only act on 101 responses when they are HTTP/1.1
-
- For 101 responses claiming to be any other protocol, bail out. This
- would previously trigger an assert.
-
- Add test 1704 to verify.
-
- Bug: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=66184
- Closes #12811
-
-Scarlett McAllister (27 Jan 2024)
-
-- _VARIABLES.md: add missing 'be' into the sentence
-
- Closes #12809
-
-Stefan Eissing (27 Jan 2024)
-
-- mqtt, remove remaining use of data->state.buffer
-
- Closes #12799
-
-Daniel Stenberg (27 Jan 2024)
-
-- x509asn1: switch from malloc to dynbuf
-
- Closes #12808
-
-- x509asn1: make utf8asn1str() use dynbuf instead of malloc + memcpy
-
- Closes #12808
-
-- x509asn1: reduce malloc in Curl_extract_certinfo
-
- Using dynbuf
-
- Closes #12808
-
-Jay Satiro (27 Jan 2024)
-
-- THANKS: add Alexander Bartel and Brennan Kinney
-
- They reported and investigated #10259 which was fixed by 7b2d98df.
-
- Ref: https://github.com/curl/curl/issues/10259
-
-Daniel Stenberg (26 Jan 2024)
-
-- krb5: add prototype to silence clang warnings on mvsnprintf()
-
- "error: format string is not a string literal"
-
- Follow-up to 09230127589eccc7 which made the warning appear
-
- Assisted-by: Viktor Szakats
- Closes #12803
-
-- x509asn1: remove code for WANT_VERIFYHOST
-
- No code ever sets this anymore since we dropped gskit
-
- Follow-up to 78d6232f1f326b9ab4d
-
- Closes #12804
-
-- socks: reduce the buffer size to 600 (from 8K)
-
- This is malloc'ed memory and it does not more. Test 742 helps us verify
- this.
-
- Closes #12789
-
-Stefan Eissing (26 Jan 2024)
-
-- file+ftp: use stack buffers instead of data->state.buffer
-
- Closes #12789
-
-- vtls: receive max buffer
-
- - do not only receive one TLS record, but try to fill
- the passed buffer
- - consider <4K remaning space is "filled".
-
- Closes #12801
-
-Daniel Stenberg (26 Jan 2024)
-
-- docs: do not start lines/sentences with So, But nor And
-
- Closes #12802
-
-- docs: remove spurious ampersands from markdown
-
- They were leftovers from the nroff conversion.
-
- Follow-up to eefcc1bda4bccd800f5a5
-
- Closes #12800
-
-Patrick Monnerat (26 Jan 2024)
-
-- sasl: make login option string override http auth
-
- - Use http authentication mechanisms as a default, not a preset.
-
- Consider http authentication options which are mapped to SASL options as
- a default (overriding the hardcoded default mask for the protocol) that
- is ignored if a login option string is given.
-
- Prior to this change, if some HTTP auth options were given, sasl mapped
- http authentication options to sasl ones but merged them with the login
- options.
-
- That caused problems with the cli tool that sets the http login option
- CURLAUTH_BEARER as a side-effect of --oauth2-bearer, because this flag
- maps to more than one sasl mechanisms and the latter cannot be cleared
- individually by the login options string.
-
- New test 992 checks this.
-
- Fixes https://github.com/curl/curl/issues/10259
- Closes https://github.com/curl/curl/pull/12790
-
-Stefan Eissing (26 Jan 2024)
-
-- socks: use own buffer instead of data->state.buffer
-
- Closes #12788
-
-Daniel Stenberg (26 Jan 2024)
-
-- socks: fix generic output string to say SOCKS instead of SOCKS4
-
- ... since it was also logged for SOCKS5.
-
- Closes #12797
-
-- test742: test SOCKS5 with max length user, password and hostname
-
- Adjusted the socksd server accordingly to allow for configuring that
- long user name and password.
-
- Closes #12797
-
-Stefan Eissing (25 Jan 2024)
-
-- ssh: use stack scratch buffer for seeks
-
- - instead of data->state.buffer
-
- Closes #12794
-
-Daniel Stenberg (25 Jan 2024)
-
-- krb5: access the response buffer correctly
-
- As the pingpong code no longer uses the download buffer.
-
- Folllow-up to c2d973627bab12ab
- Pointed-out-by: Stefan Eissing
- Closes #12796
-
-Stefan Eissing (25 Jan 2024)
-
-- mqtt: use stack scratch buffer for recv+publish
-
- - instead of data->state.buffer
-
- Closes #12792
-
-- telnet, use stack scratch buffer for do
-
- - instead of data->state.buffer
-
- Closes #12793
-
-- http, use stack scratch buffer
-
- - instead of data->state.buffer
-
- Closes #12791
-
-- ntlm_wb: do not use data->state.buf any longer
-
- Closes #12787
-
-- gitignore: the generated `libcurl-symbols.md`
-
- Closes #12795
-
-Daniel Stenberg (25 Jan 2024)
-
-- tool: fix the listhelp generation command
-
- The previous command line to generate the tool_listhelp.c source file
- broke with 2494b8dd5175cee7.
-
- Make 'make listhelp' invoked in src/ generate it. Also update the
- comment in the file to mention the right procedure.
-
- Closes #12786
-
-- http: check for "Host:" case insensitively
-
- When checking if the user wants to replace the header, the check should
- be case insensitive.
-
- Adding test 461 to verify
-
- Found-by: Dan Fandrich
- Ref: #12782
- Closes #12784
-
-Tatsuhiro Tsujikawa (25 Jan 2024)
-
-- configure: add libngtcp2_crypto_boringssl detection
-
- If OpenSSL is found to be BoringSSL or AWS-LC, and ngtcp2 is requested,
- try to detect libngtcp2_crypto_boringssl.
-
- Reported-by: ウã•ã‚“
- Fixes #12724
- Closes #12769
-
-Daniel Stenberg (25 Jan 2024)
-
-- http: remove comment reference to a removed solution
-
- Follow-up to 58974d25d
-
- Closes #12785
-
-Stefan Eissing (25 Jan 2024)
-
-- pytest: Scorecard tracking CPU and RSS
-
- Closes #12765
-
-Graham Campbell (25 Jan 2024)
-
-- GHA: bump ngtcp2, gnutls, mod_h2, quiche
-
- - ngtcp2 to v1.2.0
- - gnutls to 3.8.3
- - mod_h2 to 2.0.26
- - quiche to 0.20.0
-
- Closes #12778
- Closes #12779
- Closes #12780
- Closes #12781
-
-Daniel Stenberg (25 Jan 2024)
-
-- ftpserver.pl: send 213 SIZE response without spurious newline
-
-- pingpong: stop using the download buffer
-
- The pingpong logic now uses its own dynbuf for receiving command
- response data.
-
- When the "final" response header for a commanad has been received, that
- final line is left first in the recvbuf for the protocols to parse at
- will. If there is additional data behind the final response line, the
- 'overflow' counter is indicate how many bytes.
-
- Closes #12757
-
-- gen.pl: remove bold from .IP used for ##
-
- Reported-by: Viktor Szakats
- Fixes #12776
- Closes #12777
-
-Viktor Szakats (24 Jan 2024)
-
-- cmake: rework options to enable curl and libcurl docs
-
- Rework CMake options for building/using curl tool and libcurl manuals.
-
- - rename `ENABLE_MANUAL` to `ENABLE_CURL_MANUAL`, meaning:
- to build man page and built-in manual for curl tool.
-
- - rename `BUILD_DOCS` to `BUILD_LIBCURL_DOCS`, meaning:
- to build man pages for libcurl.
-
- - `BUILD_LIBCURL_DOCS` now works without having to enable
- `ENABLE_CURL_MANUAL` too.
-
- - drop support for existing CMake-level `USE_MANUAL` option to avoid
- confusion. (It used to work with the effect of current
- `ENABLE_CURL_MANUAL`, but only by accident.)
-
- Assisted-by: Richard Levitte
- Ref: #12771
- Closes #12773
-
-Daniel Stenberg (24 Jan 2024)
-
-- urlapi: remove assert
-
- This assert triggers wrongly when CURLU_GUESS_SCHEME and
- CURLU_NO_AUTHORITY are both set and the URL is a single path.
-
- I think this assert has played out its role. It was introduced in a
- rather big refactor.
-
- Follow-up to 4cfa5bcc9a
-
- Reported-by: promptfuzz_ on hackerone
- Closes #12775
-
-Patrick Monnerat (24 Jan 2024)
-
-- tests: avoid int/size_t conversion size/sign warnings
-
- Closes #12768
-
-Daniel Stenberg (24 Jan 2024)
-
-- GHA: add a job scanning for "bad words" in markdown
-
- This means words, phrases or things we have decided not to use - words that
- are spelled right according to the dictionary but we want to avoid. In the
- name of consistency and better documentation.
-
- Closes #12764
-
-Viktor Szakats (23 Jan 2024)
-
-- cmake: speed up curldown processing, enable by default
-
- - cmake: enable `BUILD_DOCS` by default (this controls converting and
- installing `.3` files from `.md` sources)
-
- - cmake: speed up generating `.3` files by using a single command per
- directory, instead of a single command per file. This reduces external
- commands by about a thousand. (There remains some CMake logic kicking
- in resulting in 500 -one per file- external `-E touch_nocreate` calls.)
-
- - cd2nroff: add ability to process multiple input files.
-
- - cd2nroff: add `-k` option to use the source filename to form the
- output filename. (instead of the default in-file `Title:` line.)
-
- Follow-up to 3f08d80b2244524646ce86915c585509ac54fb4c
- Follow-up to ea0b575dab86a3c44dd1d547dc500276266aa382 #12753
- Follow-up to eefcc1bda4bccd800f5a56a0fe17a2f44a96e88b #12730
-
- Closes #12762
-
-Richard Levitte (23 Jan 2024)
-
-- docs: install curl.1 with cmake as well
-
- Closes #12759
-
-Daniel Stenberg (23 Jan 2024)
-
-- osslq: remove the TLS library from the version output
-
- Since we only support using a single TLS library at any one time, we
- know that the TLS library for QUIC is the same that is also shown for
- regular TLS.
-
- Fixes #12763
- Reported-by: Viktor Szakats
- Closes #12767
-
-Stefan Eissing (23 Jan 2024)
-
-- CI: remove unnecessary OpenSSL 3 option `enable-tls1_3`
-
- .. and switch OpenSSL 3 libdir from lib64 to lib for consistency.
-
- Closes https://github.com/curl/curl/pull/12758
-
-- GHA: bump nghttp2 version to v1.59.0
-
- - Switch to v1.59.0 for GHA CI jobs that use a specific nghttp2-version.
-
- Closes https://github.com/curl/curl/pull/12766
-
-Daniel Stenberg (23 Jan 2024)
-
-- RELEASE-NOTES: synced
-
-- docs/cmdline: change to .md for cmdline docs
-
- - switch all invidual files documenting command line options into .md,
- as the documentation is now markdown-looking.
-
- - made the parser treat 4-space indents as quotes
-
- - switch to building the curl.1 manpage using the "mainpage.idx" file,
- which lists the files to include to generate it, instead of using the
- previous page-footer/headers. Also, those files are now also .md
- ones, using the same format. I gave them underscore prefixes to make
- them sort separately:
- _NAME.md, _SYNOPSIS.md, _DESCRIPTION.md, _URL.md, _GLOBBING.md,
- _VARIABLES.md, _OUTPUT.md, _PROTOCOLS.md, _PROGRESS.md, _VERSION.md,
- _OPTIONS.md, _FILES.md, _ENVIRONMENT.md, _PROXYPREFIX.md,
- _EXITCODES.md, _BUGS.md, _AUTHORS.md, _WWW.md, _SEEALSO.md
-
- - updated test cases accordingly
-
- Closes #12751
-
-dependabot[bot] (23 Jan 2024)
-
-- CI: bump actions/cache from 3 to 4
-
- Bumps [actions/cache](https://github.com/actions/cache) from 3 to 4.
- - [Release notes](https://github.com/actions/cache/releases)
- - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md)
- - [Commits](https://github.com/actions/cache/compare/v3...v4)
-
- ---
- updated-dependencies:
- - dependency-name: actions/cache
- dependency-type: direct:production
- update-type: version-update:semver-major
- ...
-
- Signed-off-by: dependabot[bot] <support@github.com>
- Closes #12756
-
-Daniel Stenberg (23 Jan 2024)
-
-- openssl: when verifystatus fails, remove session id from cache
-
- To prevent that it gets used in a subsequent transfer that skips the
- verifystatus check since that check can't be done when the session id is
- reused.
-
- Reported-by: Hiroki Kurosawa
- Closes #12760
-
-Viktor Szakats (23 Jan 2024)
-
-- cmake: add option to disable building docs
-
-Richard Levitte (23 Jan 2024)
-
-- cmake: use curldown to build man pages
-
- This throws away the previous HTML and PDF producers, to mimic what
- Makefile.am does as faithfully as possible.
-
- Closes #12753
-
-Daniel Stenberg (23 Jan 2024)
-
-- mksymbolsmanpage.pl: provide references to where the symbol is used
-
-- docs: introduce "curldown" for libcurl man page format
-
- curldown is this new file format for libcurl man pages. It is markdown
- inspired with differences:
-
- - Each file has a set of leading headers with meta-data
- - Supports a small subset of markdown
- - Uses .md file extensions for editors/IDE/GitHub to treat them nicely
- - Generates man pages very similar to the previous ones
- - Generates man pages that still convert nicely to HTML on the website
- - Detects and highlights mentions of curl symbols automatically (when
- their man page section is specified)
-
- tools:
-
- - cd2nroff: converts from curldown to nroff man page
- - nroff2cd: convert an (old) nroff man page to curldown
- - cdall: convert many nroff pages to curldown versions
- - cd2cd: verifies and updates a curldown to latest curldown
-
- This setup generates .3 versions of all the curldown versions at build time.
-
- CI:
-
- Since the documentation is now technically markdown in the eyes of many
- things, the CI runs many more tests and checks on this documentation,
- including proselint, link checkers and tests that make sure we capitalize the
- first letter after a period...
-
- Closes #12730
-
-Viktor Szakats (22 Jan 2024)
-
-- libssh2: use `libssh2_session_callback_set2()` with v1.11.1
-
- To avoid a local hack to pass function pointers and to avoid
- deprecation warnings when building with libssh2 v1.11.1 or newer:
- ```
- lib/vssh/libssh2.c:3324:5: warning: 'libssh2_session_callback_set' is depreca
- ted: since libssh2 1.11.1. Use libssh2_session_callback_set2() [-Wdeprecated-
- declarations]
- lib/vssh/libssh2.c:3326:5: warning: 'libssh2_session_callback_set' is depreca
- ted: since libssh2 1.11.1. Use libssh2_session_callback_set2() [-Wdeprecated-
- declarations]
- ```
- Ref: https://github.com/curl/curl-for-win/actions/runs/7609484879/job/2072082
- 1100#step:3:4982
-
- Ref: https://github.com/libssh2/libssh2/pull/1285
- Ref: https://github.com/libssh2/libssh2/commit/c0f69548be902147ce014ffa40b8db
- 3cf1d4b0b4
- Reviewed-by: Daniel Stenberg
- Closes #12754
-
-Daniel Stenberg (22 Jan 2024)
-
-- transfer: make the select_bits_paused condition check both directions
-
- If there is activity in a direction that is not paused, return false.
-
- Reported-by: Sergey Bronnikov
- Bug: https://curl.se/mail/lib-2024-01/0049.html
- Closes #12740
-
-Stefan Eissing (22 Jan 2024)
-
-- http3: initial support for OpenSSL 3.2 QUIC stack
-
- - HTTP/3 for curl using OpenSSL's own QUIC stack together
- with nghttp3
- - configure with `--with-openssl-quic` to enable curl to
- build this. This requires the nghttp3 library
- - implementation with the following restrictions:
- * macOS has to use an unconnected UDP socket due to an
- issue in OpenSSL's datagram implementation
- See https://github.com/openssl/openssl/issues/23251
- This makes connections to non-reponsive servers hang.
- * GET requests will send the indicator that they have
- no body in a separate QUIC packet. This may result
- in processing delays or Transfer-Encodings on proxied
- requests
- * uploads that encounter blocks will use 100% cpu as
- detection of these flow control issue is not working
- (we have not figured out to pry that from OpenSSL).
-
- Closes #12734
-
-Viktor Szakats (22 Jan 2024)
-
-- cmake: fix `ENABLE_MANUAL` option
-
- Fix the `ENABLE_MANUAL` option. Set it to default to `OFF`.
-
- Before this patch `ENABLE_MANUAL=ON` was a no-op, even though it was the
- option designed to enable building and using the built-in curl manual.
- (`USE_MANUAL=ON` option worked for this instead, by accident).
-
- Ref: https://github.com/curl/curl/pull/12730#issuecomment-1902572409
- Closes #12749
-
-Mohammadreza Hendiani (19 Jan 2024)
-
-- TODO: update broken link to ratelimit-headers draft
-
- Closes #12741
-
-Daniel Stenberg (19 Jan 2024)
-
-- cmake: when USE_MANUAL=YES, build the curl.1 man page
-
- Fixes KNOWN_BUG 15.4
-
- Closes #12742
-
-- cmdline-opts/write-out.d: remove spurious double quotes
-
-Stefan Eissing (19 Jan 2024)
-
-- rtsp: Convert assertion into debug log
-
- Bug: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=65934
-
- - write excess bytes to the client where the standard excess bytes
- checks will report any wrongness and fail the transfer
-
- Fixes #12738
- Closes #12739
-
-Daniel Stenberg (19 Jan 2024)
-
-- headers: remove assert from Curl_headers_push
-
- The fuzzer managed to reach the function without a terminating CR or LF
- so let's handle it normally. While there, remove the goto.
-
- Bug: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=65839
-
- Closes #12721
-
-- curl_easy_getinfo.3: remove the wrong time value count
-
- It said "six" time values but they are eight by now. Remove the mention
- of the amount.
-
- Closes #12727
-
-Viktor Szakats (18 Jan 2024)
-
-- mbedtls: fix `-Wnull-dereference` and `-Wredundant-decls`
-
- - Silence warning in mbedTLS v3.5.1 public headers:
- ```
- ./mbedtls/_x64-linux-musl/usr/include/psa/crypto_extra.h:489:14: warning: r
- edundant redeclaration of 'psa_set_key_domain_parameters' [-Wredundant-decls]
- ./mbedtls/_x64-linux-musl/usr/include/psa/crypto_struct.h:354:14: note: pre
- vious declaration of 'psa_set_key_domain_parameters' was here
- ```
- Ref: https://github.com/libssh2/libssh2/commit/ecec68a2c13a9c63fe8c2dc457ae
- 785a513e157c
- Ref: https://github.com/libssh2/libssh2/pull/1226
-
- - Fix compiler warnings seen with gcc 9.2.0 + cmake unity:
- ```
- ./curl/lib/vtls/mbedtls.c: In function 'mbedtls_bio_cf_read':
- ./curl/lib/vtls/mbedtls.c:189:11: warning: null pointer dereference [-Wnull
- -dereference]
- 189 | nread = Curl_conn_cf_recv(cf->next, data, (char *)buf, blen, &res
- ult);
- | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ~~~~
- ./curl/lib/vtls/mbedtls.c: In function 'mbedtls_bio_cf_write':
- ./curl/lib/vtls/mbedtls.c:168:14: warning: null pointer dereference [-Wnull
- -dereference]
- 168 | nwritten = Curl_conn_cf_send(cf->next, data, (char *)buf, blen, &
- result);
- | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ~~~~~~~
- ```
-
- - delete stray `#else`.
-
- Closes #12720
-
-Daniel Stenberg (17 Jan 2024)
-
-- docs: cleanup nroff format use
-
- - remove use of .BI for code snippet
- - stop using .br, just do a blank line
- - remove use of .PP
- - remove use for .sp
- - remove backslash in .IP
- - use .IP instead of .TP
-
- Closes #12731
-
-Stefan Eissing (17 Jan 2024)
-
-- test2307: fix expected failure code after ws refactoring
-
- Fixes #12722
- Closes #12728
-
-Jay Satiro (17 Jan 2024)
-
-- cf-socket: show errno in tcpkeepalive error messages
-
- - If the socket keepalive options (TCP_KEEPIDLE, etc) cannot be set
- then show the errno in the verbose error messages.
-
- Ref: https://github.com/curl/curl/discussions/12715#discussioncomment-8151652
-
- Closes https://github.com/curl/curl/pull/12726
-
-- tool_getparam: stop supporting `@filename` style for --cookie
-
- The `@filename` style was never documented for --cookie <data|filename>
- but prior to this change curl would accept it anyway and always treat a
- @ prefixed string as a filename.
-
- That's a problem if the string also contains a = sign because then it is
- documented to be interpreted as a cookie string and not a filename.
-
- Example:
-
- `--cookie @foo=bar`
-
- Before: Interpreted as load cookies from filename foo=bar.
-
- After: Interpreted as cookie `@foo=bar` (name `@foo` and value `bar`).
-
- Other curl options with a data/filename option-value use the `@filename`
- to distinguish filenames which is probably how this happened. The
- --cookie option has never been documented that way.
-
- Ref: https://curl.se/docs/manpage.html#-b
-
- Closes https://github.com/curl/curl/pull/12645
-
-Stefan Eissing (16 Jan 2024)
-
-- websockets: refactor decode chain
-
- - use client writer stack for decoding frames
- - move websocket protocol handler to ws.c
-
- Closes #12713
-
-- websockets: check for negative payload lengths
-
- - in en- and decoding, check the websocket frame payload lengths for
- negative values (from curl_off_t) and error the operation in that case
- - add test 2307 to verify
-
- Closes #12707
-
-Daniel Stenberg (16 Jan 2024)
-
-- docs: mention env vars not used by schannel
-
- Ref: #12704
-
- Co-authored-by: Jay Satiro <raysatiro@yahoo.com>
-
- Closes #12711
-
-- tool_operate: make --remove-on-error only remove "real" files
-
- Reported-by: Harry Sintonen
- Assisted-by: Dan Fandrich
-
- Closes #12710
-
-Jay Wu (16 Jan 2024)
-
-- url: don't set default CA paths for Secure Transport backend
-
- As the default for this backend is the native CA store.
-
- Closes #12704
-
-Lin Sun (16 Jan 2024)
-
-- asyn-ares: with modern c-ares, use its default timeout
-
- Closes #12703
-
-Daniel Stenberg (15 Jan 2024)
-
-- tool_operate: stop setting the file comment on Amiga
-
- - the URL is capped at 80 cols, which ruins it if longer
- - it does not strip off URL credentials
- - it is done unconditonally, not on --xattr
- - we don't have Amiga in the CI which makes fixing it blindly fragile
-
- Someone who builds and tests on Amiga can add it back correctly in a
- future if there is a desire.
-
- Reported-by: Harry Sintonen
- Closes #12709
-
-Stefan Eissing (15 Jan 2024)
-
-- rtsp: deal with borked server responses
-
- - enforce a response body length of 0, if the
- response has no Content-lenght. This is according
- to the RTSP spec.
- - excess bytes in a response body are forwarded to
- the client writers which will report and fail the
- transfer
-
- Follow-up to d7b6ce6
- Fixes #12701
- Closes #12706
-
-Daniel Stenberg (14 Jan 2024)
-
-- version: show only the libpsl version, not its dependencies
-
- The libpsl version output otherwise also includes version number for its
- dependencies, like IDN lib, but since libcurl does not use libpsl's IDN
- functionality those components are not important.
-
- Ref: https://github.com/curl/curl-for-win/issues/63
- Closes #12700
-
-Brad Harder (14 Jan 2024)
-
-- curl.h: CURLOPT_DNS_SERVERS is only available with c-ares
-
- Closes #12695
-
-Daniel Stenberg (14 Jan 2024)
-
-- cmdline-opts/gen.pl: error on initital blank line
-
- After the "---" separator, there should be no blank line and this script
- now errors out if one is detected.
-
- Ref: #12696
- Closes #12698
-
-- cf-h1-proxy: no CURLOPT_USERAGENT in CONNECT with hyper
-
- Follow-up to 693cd1679361828a which was incomplete
-
- Ref #12680
- Closes #12697
-
-- curl_multi_fdset.3: remove mention of null pointer support
-
- ... since this funtion has not supported null pointer fd_set arguments since
- at least 2006. (That's when I stopped my git blame journey)
-
- Fixes #12691
- Reported-by: sfan5 on github
- Closes #12692
-
-Mark Huang (14 Jan 2024)
-
-- docs/cmdline: remove unnecessary line breaks
-
- Closes #12696
-
-Daniel Stenberg (14 Jan 2024)
-
-- transfer: remove warning: Value stored to 'blen' is never read
-
- Detected by scan-build
-
- Follow-up from 1cd2f0072f
-
- Closes #12693
-
-Stefan Eissing (13 Jan 2024)
-
-- lib: replace readwrite with write_resp
-
- This clarifies the handling of server responses by folding the code for
- the complicated protocols into their protocol handlers. This concerns
- mainly HTTP and its bastard sibling RTSP.
-
- The terms "read" and "write" are often used without clear context if
- they refer to the connect or the client/application side of a
- transfer. This PR uses "read/write" for operations on the client side
- and "send/receive" for the connection, e.g. server side. If this is
- considered useful, we can revisit renaming of further methods in another
- PR.
-
- Curl's protocol handler `readwrite()` method been changed:
-
- ```diff
- - CURLcode (*readwrite)(struct Curl_easy *data, struct connectdata *conn,
- - const char *buf, size_t blen,
- - size_t *pconsumed, bool *readmore);
- + CURLcode (*write_resp)(struct Curl_easy *data, const char *buf, size_t ble
- n,
- + bool is_eos, bool *done);
- ```
-
- The name was changed to clarify that this writes reponse data to the
- client side. The parameter changes are:
-
- * `conn` removed as it always operates on `data->conn`
- * `pconsumed` removed as the method needs to handle all data on success
- * `readmore` removed as no longer necessary
- * `is_eos` as indicator that this is the last call for the transfer
- response (end-of-stream).
- * `done` TRUE on return iff the transfer response is to be treated as
- finished
-
- This change affects many files only because of updated comments in
- handlers that provide no implementation. The real change is that the
- HTTP protocol handlers now provide an implementation.
-
- The HTTP protocol handlers `write_resp()` implementation will get passed
- **all** raw data of a server response for the transfer. The HTTP/1.x
- formatted status and headers, as well as the undecoded response
- body. `Curl_http_write_resp_hds()` is used internally to parse the
- response headers and pass them on. This method is public as the RTSP
- protocol handler also uses it.
-
- HTTP/1.1 "chunked" transport encoding is now part of the general
- *content encoding* writer stack, just like other encodings. A new flag
- `CLIENTWRITE_EOS` was added for the last client write. This allows
- writers to verify that they are in a valid end state. The chunked
- decoder will check if it indeed has seen the last chunk.
-
- The general response handling in `transfer.c:466` happens in function
- `readwrite_data()`. This mainly operates now like:
-
- ```
- static CURLcode readwrite_data(data, ...)
- {
- do {
- Curl_xfer_recv_resp(data, buf)
- ...
- Curl_xfer_write_resp(data, buf)
- ...
- } while(interested);
- ...
- }
- ```
-
- All the response data handling is implemented in
- `Curl_xfer_write_resp()`. It calls the protocol handler's `write_resp()`
- implementation if available, or does the default behaviour.
-
- All raw response data needs to pass through this function. Which also
- means that anyone in possession of such data may call
- `Curl_xfer_write_resp()`.
-
- Closes #12480
-
-Daniel Stenberg (13 Jan 2024)
-
-- RELEASE-NOTES: synced
-
-- TODO: TFTP doesn't convert LF to CRLF for mode=netascii
-
- Closes #12655
- Closes #12690
-
-- gen: do italics/bold for a range of letters, not just single word
-
- Previously it would match only on a sequence of non-space, which made it
- miss to highlight for example "public suffix list".
-
- Updated the recent cookie.d edit from 5da57193b732 to use bold instead
- of italics.
-
- Closes #12689
-
-- docs: describe and highlight super cookies
-
- Reported-by: Yadhu Krishna M
-
- Closes #12687
-
-- configure: when enabling QUIC, check that TLS supports QUIC
-
- Most importantly perhaps is when using OpenSSL that the used
- build/flavor has the QUIC API: the vanilla OpenSSL does not, only
- BoringSSL, libressl, AWS-LC and quictls do.
-
- Ref: https://github.com/curl/curl/commit/5d044ad9480a9f556f4b6a252d7533b1ba7f
- e57e#r136780413
-
- Closes #12683
-
-Stefan Eissing (11 Jan 2024)
-
-- vquic: extract TLS setup into own source
-
- - separate ngtcp2 specific parts out
- - provide callback during init to allow ngtcp2 to apply its defaults
-
- Closes #12678
-
-Sergey Markelov (11 Jan 2024)
-
-- multi: remove total timer reset in file_do() while fetching file://
-
- The total timer is properly reset in MSTATE_INIT. MSTATE_CONNECT starts
- with resetting the timer that is a start point for further multi states.
- If file://, MSTATE_DO calls file_do() that should not reset the total
- timer. Otherwise, the total time is always less than the pre-transfer
- and the start transfer times.
-
- Closes #12682
-
-Daniel Stenberg (11 Jan 2024)
-
-- http_proxy: a blank CURLOPT_USERAGENT should not be used in CONNECT
-
- Extended test 80 to verify this.
-
- Reported-by: Stefan Eissing
- Fixes #12680
- Closes #12681
-
-- sectransp: do verify_cert without memdup for blobs
-
- Since the information is then already stored in memory, this can avoid
- an extra set of malloc + free calls.
-
- Closes #12679
-
-- hsts: remove assert for zero length domain
-
- A zero length domain can happen if the HSTS parser is given invalid
- input data which is not unheard of and is done by the fuzzer.
-
- Follow-up from cfe7902111ae547873
-
- Bug: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=65661
-
- Closes #12676
-
-- headers: make sure the trailing newline is not stored
-
- extended test1940 to verify blank header fields too
-
- Bug: https://curl.se/mail/lib-2024-01/0019.html
- Reported-by: Dmitry Karpov
- Closes #12675
-
-- curl_easy_header.3: tiny language fix
-
- Closes #12672
-
-- examples/range.c: add
-
- Closes #12671
-
-- examples/netrc.c: add
-
- Closes #12671
-
-- examples/ipv6.c: new example showing IPv6-only internet transfer
-
- Closes #12671
-
-- examples/address-scope.c: renamed from ipv6.c
-
- It shows address scope use really
-
- Closes #12671
-
-Stefan Eissing (9 Jan 2024)
-
-- multi: pollset adjust, init with FIRSTSOCKET during connect
-
- - `conn->sockfd` is set by `Curl_setup_transfer()`, but that
- is called *after* the connection has been established
- - use `conn->sock[FIRSTSOCKET]` instead
-
- Follow-up to a0f94800d507de
- Closes #12664
-
-Daniel Stenberg (9 Jan 2024)
-
-- WEBSOCKET.md: remove dead link
-
-- CI: spellcheck/appveyor: invoke configure --without-libpsl
-
- Follow-up to 2998874bb61ac6
-
-- cmdline/docs/*.d: switch to using ## instead of .IP
-
- To make the editing easier. To write and to read.
-
- Closes #12667
-
-- gen.pl: support ## for doing .IP in table-like lists
-
- Warn on use of .RS/.IP/.RE
-
- Closes #12667
-
-Jay Satiro (9 Jan 2024)
-
-- cookie.d: Document use of empty string to enable cookie engine
-
- - Explain that --cookie "" can be used to enable the cookie engine
- without reading any initial cookies.
-
- As is documented in CURLOPT_COOKIEFILE.
-
- Ref: https://curl.se/libcurl/c/CURLOPT_COOKIEFILE.html
-
- Bug: https://github.com/curl/curl/issues/12643#issuecomment-1879844420
- Reported-by: janko-js@users.noreply.github.com
-
- Closes https://github.com/curl/curl/pull/12646
-
-Daniel Stenberg (9 Jan 2024)
-
-- setopt: use memdup0 when cloning COPYPOSTFIELDS
-
- Closes #12651
-
-- telnet: use dynbuf instad of malloc for escape buffer
-
- Previously, send_telnet_data() would malloc + free a buffer every time
- for escaping IAC codes. Now, it reuses a dynbuf for this purpose.
-
- Closes #12652
-
-- CI: install libpsl or configure --without-libpsl in builds
-
- As a follow-up to the stricted libpsl check in configure
-
-- configure: make libpsl detection failure cause error
-
- To force users to explictily disable it if they really don't want it
- used and make it harder to accidentally miss it.
-
- --without-libpsl is the option to use if PSL is not wanted.
-
- Closes #12661
-
-- RELEASE-NOTES: synced
-
-- pop3: replace calloc + memcpy with memdup0
-
- ... and make sure to return error on out of memory.
-
- Closes #12650
-
-- lib: add debug log outputs for CURLE_BAD_FUNCTION_ARGUMENT
-
- Closes #12658
-
-- mime: use memdup0 instead of malloc + memcpy
-
- Closes #12649
-
-- tool_getparam: move the --rate logic into set_rate()
-
-- tool_getparam: switch to an enum for every option
-
- To make the big switch much easier to read/understand and to make it
- easier to add new options.
-
-- tool_getparam: build post data using dynbuf (more)
-
-- tool_getparam: replace malloc + copy by dynbuf for --data
-
-- tool_getparam: make data_urlencode avoid direct malloc
-
- use aprintf() instead
-
-- tool_getparam: move the --url-query logic into url_query()
-
- This function is not doing post at all so it was always weirdly placed.
-
-- tool_getparam: move the --data logic into set_data()
-
-- tool_getparam: unify the cmdline switch() into a single one
-
- - easier to follow, easier to modify, easier to extend, possibly slightly
- faster
-
- - each case now has the long option as a comment
-
-- tool_getparam: bsearch cmdline options
-
- - the option names are now alpha sorted and lookup is a lot faster
-
- - use case sensitive matching. It was previously case insensitive, but that
- was not documented nor tested.
-
- - remove "partial match" feature. It was not documented, not tested and
- was always fragile as existing use could break when we add a new
- option
-
- - lookup short options via a table
-
- Closes #12631
-
-Gabe (8 Jan 2024)
-
-- COPYING: update copyright year
-
- Closes #12654
-
-Stefan Eissing (8 Jan 2024)
-
-- url: init conn->sockfd and writesockfd to CURL_SOCKET_BAD
-
- Also add more tracing to test 19
-
- Follow-up to a0f9480
-
- Fixes #12657
- Closes #12659
-
-Daniel Stenberg (8 Jan 2024)
-
-- connect: remove margin from eyeballer alloc
-
- Presumably leftovers from debugging
-
- Closes #12647
-
-- ftp: only consider entry path if it has a length
-
- Follow-up from 8edcfedc1a144f438bd1cdf814a0016cb
-
- Bug: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=65631
-
- Avoids a NULL pointer deref.
-
- Closes #12648
-
-Stefan Eissing (7 Jan 2024)
-
-- transfer: adjust_pollset improvements
-
- - let `multi_getsock()` initialize the pollset in what the
- transfer state requires in regards to SEND/RECV
- - change connection filters `adjust_pollset()` implementation
- to react on the presence of POLLIN/-OUT in the pollset and
- no longer check CURL_WANT_SEND/CURL_WANT_RECV
- - cf-socket will no longer add POLLIN on its own
- - http2 and http/3 filters will only do adjustments if the
- passed pollset wants to POLLIN/OUT for the transfer on
- the socket. This is similar to the HTTP/2 proxy filter
- and works in stacked filters.
-
- Closes #12640
-
-Daniel Stenberg (6 Jan 2024)
-
-- ftp: use memdup0 to store the OS from a SYST 215 response
-
- avoid malloc + direct buffer fiddle
-
- Closes #12639
-
-- ftp: use dynbuf to store entrypath
-
- avoid direct malloc
-
- Closes #12638
-
-Lealem Amedie (6 Jan 2024)
-
-- wolfssl: load certificate *chain* for PEM client certs
-
- Closes #12634
-
-Stefan Eissing (4 Jan 2024)
-
-- http: adjust_pollset fix
-
- do not add a socket for POLLIN when the transfer does not want to send
- (for example is paused).
-
- Follow-up to 47f5b1a
-
- Reported-by: bubbleguuum on github
- Fixes #12632
- Closes #12633
-
-Daniel Stenberg (3 Jan 2024)
-
-- tool: make parser reject blank arguments if not supported
-
- Already in the getstr() function that clones the input argument.
-
- Closes #12620
-
-dependabot[bot] (3 Jan 2024)
-
-- build(deps): bump github/codeql-action from 2 to 3
-
- Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2
- to 3.
- - [Release notes](https://github.com/github/codeql-action/releases)
- - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- - [Commits](https://github.com/github/codeql-action/compare/v2...v3)
-
- ---
- updated-dependencies:
- - dependency-name: github/codeql-action
- dependency-type: direct:production
- update-type: version-update:semver-major
- ...
-
- Signed-off-by: dependabot[bot] <support@github.com>
-
- Closes #12625
-
-- build(deps): bump actions/checkout from 3 to 4
-
- Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4.
- - [Release notes](https://github.com/actions/checkout/releases)
- - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- - [Commits](https://github.com/actions/checkout/compare/v3...v4)
-
- ---
- updated-dependencies:
- - dependency-name: actions/checkout
- dependency-type: direct:production
- update-type: version-update:semver-major
- ...
-
- Signed-off-by: dependabot[bot] <support@github.com>
-
- Closes #12624
-
-- build(deps): bump actions/upload-artifact from 3 to 4
-
- Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) f
- rom 3 to 4.
- - [Release notes](https://github.com/actions/upload-artifact/releases)
- - [Commits](https://github.com/actions/upload-artifact/compare/v3...v4)
-
- ---
- updated-dependencies:
- - dependency-name: actions/upload-artifact
- dependency-type: direct:production
- update-type: version-update:semver-major
- ...
-
- Signed-off-by: dependabot[bot] <support@github.com>
-
- Closes #12627
-
-- build(deps): bump actions/download-artifact from 3 to 4
-
- Bumps [actions/download-artifact](https://github.com/actions/download-artifac
- t) from 3 to 4.
- - [Release notes](https://github.com/actions/download-artifact/releases)
- - [Commits](https://github.com/actions/download-artifact/compare/v3...v4)
-
- ---
- updated-dependencies:
- - dependency-name: actions/download-artifact
- dependency-type: direct:production
- update-type: version-update:semver-major
- ...
-
- Signed-off-by: dependabot[bot] <support@github.com>
-
- Closes #12626
-
-Stefan Eissing (3 Jan 2024)
-
-- http3/quiche: fix result code on a stream reset
-
- - fixes pytest failures in test 07_22
- - aligns CURLcode values on stream reset with ngtcp2
-
- Closes #12629
-
-Daniel Stenberg (2 Jan 2024)
-
-- setopt: clear mimepost when formp is freed
-
- A precaution to avoid a possibly dangling pointer left behind.
-
- Reported-by: Thomas Ferguson
- Fixes #12608
- Closes #12621
-
-Andy Alt (2 Jan 2024)
-
-- CI: Add dependabot.yml
-
- This will cause dependabot to open a PR when various actions are
- updated, provided that the action maintainer has issued a release.
-
- Closes #12623
-
-Gisle Vanem (2 Jan 2024)
-
-- content_encoding: change return code to typedef'ed enum
-
- ... to work around a clang ubsan warning.
-
- Fixes #12618
- Closes #12622
-
-Daniel Stenberg (2 Jan 2024)
-
-- tool: prepend output_dir in header callback
-
- When Content-Disposition parsing is used and an output dir is prepended,
- make sure to store that new file name correctly so that it can be used
- for setting the file timestamp when --remote-time is used.
-
- Extended test 3012 to verify.
-
- Co-Authored-by: Jay Satiro
- Reported-by: hgdagon on github
- Fixes #12614
- Closes #12617
-
-- test1254: fix typo in name plus shorten it
-
-- RELEASE-NOTES: synced
-
-Viktor Szakats (2 Jan 2024)
-
-- schannel: fix `-Warith-conversion` gcc 13 warning
-
- ```
- lib/vtls/schannel.c:1201:22: warning: conversion to 'unsigned int' from 'int'
- may change the sign of the result [-Warith-conversion]
- 1201 | *extension_len = *list_len +
- | ^
- ```
-
- Closes #12616
-
-- asyn-thread: silence `-Wcast-align` warning for Windows
-
- Seen with llvm/clang 17:
- ```
- lib/asyn-thread.c:310:5: warning: cast from 'PCHAR' (aka 'char *') to 'struct
- thread_sync_data *' increases required alignment from 1 to 8 [-Wcast-align]
- 310 | CONTAINING_RECORD(overlapped, struct thread_sync_data, w8.overlap
- ped);
- | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ~~~~
- .../llvm-mingw/aarch64-w64-mingw32/include/winnt.h:717:48: note: expanded fro
- m macro 'CONTAINING_RECORD'
- 717 | #define CONTAINING_RECORD(address,type,field) ((type *)((PCHAR)(addre
- ss) - (ULONG_PTR)(&((type *)0)->field)))
- | ^~~~~~~~~~~~~~~~~~~~~~
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ```
-
- Follow-up to a6bbc87f9e9ffb46a1801dfb983e7534825ed56b #12482
-
- Ref: https://github.com/curl/curl/pull/12482#issuecomment-1873017261
- Closes #12615
-
-Daniel Stenberg (2 Jan 2024)
-
-- tool_listhelp: regenerate after recent .d updates
-
- Makes it survive test 1478
-
- Closes #12612
-
-- test1478: verify src/tool_listhelp.c
-
- Verify that the source file on disk is identical to the output of gen.pl
- listhelp, as otherwise they are out of sync and need attention.
-
- Closes #12612
-
-- testutil: make runtests support %include
-
- Using this instruction, a test case can include the contents of a file
- into the test during the preprocessing.
-
- Closes #12612
-
-- runtests: for mode="text" on <stdout>, fix newlines on both parts
-
- Closes #12612
-
-Jay Satiro (2 Jan 2024)
-
-- quiche: return CURLE_HTTP3 on send to invalid stream
-
- Prior to this change if a send failed on a stream in an invalid state
- (according to quiche) and not marked as closed (according to libcurl)
- then the send function would return CURLE_SEND_ERROR.
-
- We already have similar code for ngtcp2 to return CURLE_HTTP3 in this
- case.
-
- Caught by test test_07_upload.py: test_07_22_upload_parallel_fail.
-
- Fixes https://github.com/curl/curl/issues/12590
- Closes https://github.com/curl/curl/pull/12597
-
-Daniel Stenberg (1 Jan 2024)
-
-- cmdline-opts: update availability for the *-ca-native options
-
- Closes #12613
-
-Patrick Monnerat (31 Dec 2023)
-
-- openldap: fix STARTTLS
-
- It was not working anymore since introduction of connection filters.
-
- Also do not attempt to recover from a failing TLS negotiation with
- CURLUSESSL_TRY.
-
- Closes #12610
-
-Daniel Stenberg (31 Dec 2023)
-
-- haproxy-clientip.d: document the arg
-
- The arg keyword was missing and therefore not present in the man page.
-
- Closes #12611
-
-annalee (29 Dec 2023)
-
-- configure: fix no default int compile error in ipv6 detection
-
- Closes #12607
-
-Dan Fandrich (28 Dec 2023)
-
-- CI: Fix use of any-glob-to-all-files in the labeler
-
- Despite its name, this atom acts like one-glob-to-all-files and a
- different syntax with braces must be used to get
- any-glob-to-all-files semantics. Unfortunately, this makes the file
- completely unreadable.
-
- Ref: https://github.com/actions/labeler/issues/731
-
-Daniel Stenberg (29 Dec 2023)
-
-- CURLOPT_AUTOREFERER.3: mention CURLINFO_REFERER
-
-- CURLINFO_REFERER.3: clarify that it is the *request* header
-
- That libcurl itself sent in the most recent request
-
- Closes #12605
-
-Jay Satiro (28 Dec 2023)
-
-- system_win32: fix a function pointer assignment warning
-
- - Use CURLX_FUNCTION_CAST to suppress a function pointer assignment
- warning.
-
- a6bbc87f added lookups of some Windows API functions and then cast them
- like `*(FARPROC*)&Curl_funcname = address`. Some versions of gcc warn
- about that as breaking strict-aliasing rules so this PR changes those
- assignments to use CURLX_FUNCTION_CAST.
-
- Bug: https://github.com/curl/curl/pull/12581#issuecomment-1869804317
- Reported-by: Marcel Raad
-
- Closes https://github.com/curl/curl/pull/12602
-
-- verify-examples.pl: fail verification on unescaped backslash
-
- - Check that all backslashes in EXAMPLE are properly escaped.
-
- eg manpage must always use `\\n` never `\n`.
-
- This is because the manpage requires we always double blackslash to show
- a single backslash. Prior to this change an erroneous single backslash
- would pass through and compile even though it would not show correctly
- in the manpage.
-
- Co-authored-by: Daniel Stenberg
-
- Ref: https://github.com/curl/curl/pull/12588
-
- Closes https://github.com/curl/curl/pull/12589
-
-- vtls: fix missing multissl version info
-
- - Fix erroneous buffer copy logic from ff74cef5.
-
- Prior to this change the MultiSSL version info returned to the user
- was empty.
-
- Closes https://github.com/curl/curl/pull/12599
-
-Daniel Stenberg (27 Dec 2023)
-
-- KNOWN_BUGS: [RTSP] Some methods do not support response bodies
-
- Closes #12414
-
-Patrick Monnerat (27 Dec 2023)
-
-- openldap: fix an LDAP crash
-
- Reported-by: Ozan Cansel
- Fixes #12593
- Closes #12600
-
-Daniel Stenberg (27 Dec 2023)
-
-- getinfo: CURLINFO_QUEUE_TIME_T
-
- Returns the time, in microseconds, during which this transfer was held
- in a waiting queue before it started "for real". A transfer might be put
- in a queue if after getting started, it cannot create a new connection
- etc due to set conditions and limits imposed by the application.
-
- Ref: #12293
- Closes #12368
-
-- RELEASE-NOTES: synced
-
-Jay Satiro (26 Dec 2023)
-
-- examples/sendrecv: fix comment line length
-
- Caught by checksrc.
-
-Haydar Alaidrus (23 Dec 2023)
-
-- CURLOPT_POSTFIELDS.3: fix incorrect C string escape in example
-
- - Escape inner quotes with two backslashes.
-
- Two backslashes escapes the backslash for the man page and will show as
- a single backslash.
-
- eg: "{\\"name\\": \\"daniel\\"}" shows as "{\"name\": \"daniel\"}".
-
- Closes https://github.com/curl/curl/pull/12588
-
-Viktor Szakats (23 Dec 2023)
-
-- appveyor: tidy-ups
-
- - replace two remaining backslashes with forward slashes.
- - tidy up the way we form and pass `TFLAGS`.
-
- Follow-up to 2d4d0c1fd32f5cc3f946c407c8eccd5477b287df #12572
-
- Closes #12582
-
-Stefan Eissing (22 Dec 2023)
-
-- transfer: fix upload rate limiting, add test cases
-
- - add test cases for rate limiting uploads for all
- http versions
- - fix transfer loop handling of limits. Signal a re-receive
- attempt only on exhausting maxloops without an EAGAIN
- - fix `data->state.selectbits` forcing re-receive to also
- set re-sending when transfer is doing this.
-
- Reported-by: Karthikdasari0423 on github
- Fixes #12559
- Closes #12586
-
-Daniel Stenberg (22 Dec 2023)
-
-- mbedtls: free the entropy when threaded
-
- The entropy_free was never done for threaded builds, causing a small
- (fixed) memory leak.
-
- Reported-by: RevaliQaQ on github
- Fixes #12584
- Closes #12585
-
-Stefan Eissing (22 Dec 2023)
-
-- http2: improved on_stream_close/data_done handling
-
- - there seems to be a code path that cleans up easy handles without
- triggering DONE or DETACH events to the connection filters. This
- would explain wh nghttp2 still holds stream user data
- - add GOOD check to easy handle used in on_close_callback to
- prevent crashes, ASSERTs in debug builds.
- - NULL the stream user data early before submitting RST
- - add checks in on_stream_close() to identify UNGOOD easy handles
-
- Reported-by: Hans-Christian Egtvedt
- Fixes #10936
- Closes #12562
-
-Daniel Stenberg (22 Dec 2023)
-
-- mprintf: overhaul and bugfixes
-
- In a test case using lots of snprintf() calls using many commonly used
- %-codes per call, this version is around 30% faster than previous
- version.
-
- It also fixes the #12561 bug which made it not behave correctly when
- given unknown %-sequences. Fixing that flaw required a different take on
- the problem, which resulted in the new two-arrays model.
-
- lib557: extended - Verify the #12561 fix and test more printf features
-
- unit1398: fix test: It used a <num>$ only for one argument, which is not
- supported.
-
- Fixes #12561
- Closes #12563
-
-Viktor Szakats (21 Dec 2023)
-
-- appveyor: replace PowerShell with bash + parallel autotools
-
- PowerShell works (after a steep development curve), but one property of
- it stuck and kept causing unresolvable usability issues: With
- `$ErrorActionPreference=Stop`, it does abort on failures, but shows only
- the first line of the error message. In `Continue` mode, it shows the
- full error message, but doesn't stop on all errors. Another issue is
- PowerShell considering any stderr output as if the command failed (this
- has been improved in 7.2 (2021-Nov), but fixed versions aren't running
- in CI and will not be for a long time in all test images.)
-
- Thus, we're going with bash.
-
- Also:
- - use `-j2` with autotools tests, making them finish 5-15 minutes per
- job faster.
- - omit `POSIX_PATH_PREFIX`.
- - use `WINDIR`.
- - prefer forward slashes.
-
- Follow-up to: 75078a415d9c769419aed4153d3d525a8eba95af #11999
- Ref: #12444
-
- Fixes #12560
- Closes #12572
-
-Pavel Pavlov (21 Dec 2023)
-
-- asyn-thread: use GetAddrInfoExW on >= Windows 8
-
- For doing async DNS resolution instead of starting a thread for each
- request.
-
- Fixes #12481
- Closes #12482
-
-Daniel Stenberg (21 Dec 2023)
-
-- strerror: repair get_winsock_error()
-
- It would try to read longer than the provided string and crash.
-
- Follow-up to ff74cef5d4a0cf60106517a1c7384
- Reported-by: calvin2021y on github
- Fixes #12578
- Closes #12579
-
-- CURLOPT_SSH_*_KEYFILE: clarify
-
- Closes #12554
-
-ivanfywang (21 Dec 2023)
-
-- ngtcp2: put h3 at the front of alpn
-
- Closes #12576
-
-Daniel Stenberg (21 Dec 2023)
-
-- test460: verify a command line using --expand with no argument
-
- This verifies the fix for #12565
-
-- tool_getparam: do not try to expand without an argument
-
- This would lead to a segfault.
-
- Fixes #12565
- Reported-by: Geeknik Labs
- Closes #12575
-
-- RELEASE-NOTES: synced
-
- Bumped version to 8.6.0 because of changes
-
-- Makefile.am: fix the MSVC project generation
-
- It made the vcxproj files not get included in dist tarballs.
-
- Regression since 74423b5df4c8117891eb89 (8.5.0)
-
- Reported-by: iAroc on github
- Fixes #12564
- Closes #12567
-
-zengwei2000 (21 Dec 2023)
-
-- altsvc: free 'as' when returning error
-
- Closes #12570
-
- Signed-off-by: zengwei <zengwei1@uniontech.com>
-
-Viktor Szakats (20 Dec 2023)
-
-- build: fix `-Wconversion`/`-Wsign-conversion` warnings
-
- Fix remaining warnings in examples and tests which are not suppressed
- by the pragma in `lib/curl_setup.h`.
-
- Silence a toolchain issue causing warnings in `FD_SET()` calls with
- older Cygwin/MSYS2 builds. Likely fixed on 2020-08-03 by:
- https://cygwin.com/git/?p=newlib-cygwin.git;a=commitdiff;h=5717262b8ecfed0f7f
- ab63e2c09c78991e36f9dd
-
- Follow-up to 2dbe75bd7f3c36837aa06fd87a442bdf3fb7faef #12492
-
- Closes #12557
-
-- build: fix some `-Wsign-conversion`/`-Warith-conversion` warnings
-
- - enable `-Wsign-conversion` warnings, but also setting them to not
- raise errors.
- - fix `-Warith-conversion` warnings seen in CI.
- These are triggered by `-Wsign-converion` and causing errors unless
- explicitly silenced. It makes more sense to fix them, there just a few
- of them.
- - fix some `-Wsign-conversion` warnings.
- - hide `-Wsign-conversion` warnings with a `#pragma`.
- - add macro `CURL_WARN_SIGN_CONVERSION` to unhide them on a per-build
- basis.
- - update a CI job to unhide them with the above macro:
- https://github.com/curl/curl/actions/workflows/linux.yml -> OpenSSL -O3
-
- Closes #12492
-
-- cmake: tidy-up `OtherTests.cmake`
-
- - make more obvious which detection uses which prep steps.
- - merge and streamline conditions.
- - these should not alter detection results.
-
- Also align log output messages from
- `Macros.cmake` / `curl_internal_test` with rest of the build.
-
- Closes #12551
-
-- appveyor: switch to out-of-tree builds
-
- With cmake and autotools.
-
- Closes #12550
-
-Daniel Stenberg (19 Dec 2023)
-
-- DEPRECATE.md: mention that NTLM_WB no longer works
-
- Ref: #12479
- Closes #12553
-
-- CURLOPT_SERVER_RESPONSE_TIMEOUT_MS: add
-
- Proposed-by: Yifei Kong
- Ref: https://curl.se/mail/lib-2023-11/0023.html
- Closes #12369
-
-Viktor Szakats (18 Dec 2023)
-
-- build: more `-Wformat` fixes
-
- - memdebug: update to not trigger `-Wformat-nonliteral` warnings.
- - imap: mark `imap_sendf()` with `CURL_PRINTF()`.
- - tool_msgs: mark static function with `CURL_PRINTF()`.
-
- Follow-up to 3829759bd042c03225ae862062560f568ba1a231 #12489
-
- Closes #12540
-
-- windows: delete redundant headers
-
- `winsock2.h` pulls in `windows.h`. `ws2tcpip.h` pulls in `winsock2.h`.
- `winsock2.h` and `ws2tcpip.h` are also pulled by `curl/curl.h`.
-
- Keep only those headers that are not already included, or the code under
- it uses something from that specific header.
-
- Closes #12539
-
-- cmake: prefill/cache `HAVE_STRUCT_SOCKADDR_STORAGE`
-
- Also add missing include to `OtherTests.cmake`. It didn't cause an issue
- because the parent already included this earlier by chance.
-
- Closes #12537
-
-Daniel Stenberg (18 Dec 2023)
-
-- runner.pm: fix perl warning when running tests
-
- Use of uninitialized value $runner::gdbthis in numeric eq (==) at runner.
- pm
-
- Follow-up from 3dcf301752a09d9
-
- Closes #12549
-
-- runtests: support -gl. Like -g but for lldb.
-
- Follow-up to 63b5748
-
- Invokes the test case via lldb instead of gdb. Since using gdb is such a
- pain on mac, using lldb is sometimes less quirky.
-
- Closes #12547
-
-- curl.h: add CURLE_TOO_LARGE
-
- A new error code to be used when an internal field grows too large, like
- when a dynbuf reaches its maximum. Previously it would return
- CURLE_OUT_OF_MEMORY for this, which is highly misleading.
-
- Ref: #12268
- Closes #12269
-
-- CI/circleci: disable MQTT in the HTTP-only build
-
- And remove the use of configure options that don't actually exist
-
- Closes #12546
-
-Yedaya Katsman (18 Dec 2023)
-
-- tests: respect $TMPDIR when creating unix domain sockets
-
- When running on termux, where $TMPDIR isn't /tmp, running the tests
- failed, since the server config tried creating sockets in /tmp, without
- checking the temp dir config. Use the TMPDIR variable that makes it find
- the correct directory everywhere [0]
-
- [0] https://perldoc.perl.org/File::Temp#tempfile
-
- Closes #12545
-
-Viktor Szakats (17 Dec 2023)
-
-- ssh: fix namespace of two local macros
-
- Avoid using the libssh and libssh2 macro namespaces by prefixing
- these local macro names with `CURL_`.
-
- Follow-up to 413a0fedd02c8c6df1d294534b8c6e306fcca7a2 #12346
-
- Reviewed-by: Daniel Stenberg
- Closes #12544
-
-- cmake: whitespace tidy-up in `OtherTests.cmake`
-
- Closes #12538
-
-Mark Sinkovics (16 Dec 2023)
-
-- cmake: fix generation for system name iOS
-
- This PR fixes a problem that happens during CMake configuration when
- the `CMAKE_SYSTEM_NAME` set to `iOS` and not `Darwin`. This value is
- available (as far as I remember) version 3.14. The final solution
- (thanks to @vszakats) is to use `APPLE` which contains all the Apple
- platforms https://cmake.org/cmake/help/latest/variable/APPLE.html.
-
- This issue was found when during vcpkg installation. Running command
- `vcpkg install curl:arm64-ios` and `vcpkg install curl:x64-ios` failed
- with message:
- ```
- CMake Error: try_run() invoked in cross-compiling mode, please set the follow
- ing cache variables appropriately:
- HAVE_H_ERRNO_ASSIGNABLE_EXITCODE (advanced)
- ```
- After this fix, I was able to compile the compile the binary without
- any issue.
-
- In addition to that fix, this PR also contains an simplification to
- check if the platform is not APPLE.
-
- Co-authored-by: Viktor Szakats
- Closes #12515
-
-Daniel Stenberg (16 Dec 2023)
-
-- RELEASE-NOTES: synced
-
-Baruch Siach (16 Dec 2023)
-
-- gnutls: fix build with --disable-verbose
-
- infof() parameters must be defined event with --disable-verbose since
- commit dac293cfb702 ("lib: apache style infof and trace
- macros/functions").
-
- Move also 'ptr' definition under !CURL_DISABLE_VERBOSE_STRINGS.
-
- Fixes the following build failure:
-
- In file included from ../lib/sendf.h:29,
- from vtls/gtls.c:44:
- vtls/gtls.c: In function 'Curl_gtls_verifyserver':
- vtls/gtls.c:841:34: error: 'version' undeclared (first use in this function);
- did you mean 'session'?
- 841 | gnutls_protocol_get_name(version), ptr);
- | ^~~~~~~
-
- Closes #12505
-
-Viktor Szakats (16 Dec 2023)
-
-- build: delete unused `HAVE_{GSSHEIMDAL,GSSMIT,HEIMDAL}`
-
- Stop setting `HAVE_GSSHEIMDAL`, `HAVE_GSSMIT` and `HAVE_HEIMDAL`.
- There was no place in the build system or source code that used them.
-
- Reviewed-by: Daniel Stenberg
- Closes #12506
-
-- build: remove redundant `CURL_PULL_*` settings
-
- These macros were not propagated to the source code from CMake.
-
- autotools set only one of them (`CURL_PULL_SYS_POLL_H`), initially to
- address an AIX issue [1]. This later broke when introducing `system.h`
- [2] without the logic it enabled. A subsequent fix [3] re-added the
- logic, and also enabled it for AIX before its use, directly in
- `system.h`.
-
- [1] 2012-11-23: 665adcd4b7bcdb7deb638cdc499fbe71f8d777f2
- [2] 2017-03-29: 9506d01ee50d5908138ebad0fd9fbd39b66bd64d #1373
- [3] 2017-08-25: 8a84fcc4b59e8b78d2acc6febf44a43d6bc81b59 #1828 #1833
-
- Reviewed-by: Daniel Stenberg
- Closes #12502
-
-- system.h: sync mingw `CURL_TYPEOF_CURL_SOCKLEN_T` with other compilers
-
- Align mingw with the other Windows compilers and use the `int` type for
- `CURL_TYPEOF_CURL_SOCKLEN_T` (and thus for `curl_socklent_t`). This
- makes it unnecessary to make a mingw-specific trick and pull all Windows
- headers early just for this type definition. This type is specific to
- Windows, not to the compiler. mingw-w64's Windows header maps it to
- `int` too.
-
- With this we also delete all remaining uses of `CURL_PULL_WS2TCPIP_H`.
-
- [ The official solution is to use `socklen_t` for all Windows compilers.
- In this case we may want to update `curl/curl.h` to pull in Windows
- headers before `system.h`. ]
-
- Reviewed-by: Daniel Stenberg
- Reviewed-by: Jay Satiro
- Closes #12501
-
-- windows: simplify detecting and using system headers
-
- - autotools, cmake: assume that if we detect Windows, `windows.h`,
- `winsock2.h` and `ws2tcpip.h` do exist.
- - lib: fix 3 outlier `#if` conditions to use `USE_WINSOCK` instead of
- looking for `winsock2.h`.
- - autotools: merge 3 Windows check methods into one.
- - move Watt-32 and lwIP socket support to `setup-win32.h` from
- `config-win32.h`. It opens up using these with all build tools. Also
- merge logic with Windows Sockets.
- - fix to assume Windows sockets with the mingw32ce toolchain.
- Follow-up to: 2748c64d605b19fb419ae56810ad8da36487a2d4
- - cmake: delete unused variable `signature_call_conv` since
- eb33ccd5332435fa50f1758e5debb869c6942b7f.
- - autotools: simplify `CURL_CHECK_WIN32_LARGEFILE` detection.
- - examples/externalsocket: fix header order.
- - cmake/OtherTests.cmake: delete Windows-specific `_source_epilogue`
- that wasn't used anymore.
- - cmake/OtherTests.cmake: set `WIN32_LEAN_AND_MEAN` for test
- `SIZEOF_STRUCT_SOCKADDR_STORAGE`.
-
- After this patch curl universally uses `_WIN32` to guard
- Windows-specific logic. It guards Windows Sockets-specific logic with
- `USE_WINSOCK` (this might need further work).
-
- Reviewed-by: Jay Satiro
- Closes #12495
-
-- build: enable missing OpenSSF-recommended warnings, with fixes
-
- https://best.openssf.org/Compiler-Hardening-Guides/Compiler-Options-Hardening
- -Guide-for-C-and-C++.html
- as of 2023-11-29 [1].
-
- Enable new recommended warnings (except `-Wsign-conversion`):
-
- - enable `-Wformat=2` for clang (in both cmake and autotools).
- - add `CURL_PRINTF()` internal attribute and mark functions accepting
- printf arguments with it. This is a copy of existing
- `CURL_TEMP_PRINTF()` but using `__printf__` to make it compatible
- with redefinting the `printf` symbol:
- https://gcc.gnu.org/onlinedocs/gcc-3.0.4/gcc_5.html#SEC94
- - fix `CURL_PRINTF()` and existing `CURL_TEMP_PRINTF()` for
- mingw-w64 and enable it on this platform.
- - enable `-Wimplicit-fallthrough`.
- - enable `-Wtrampolines`.
- - add `-Wsign-conversion` commented with a FIXME.
- - cmake: enable `-pedantic-errors` the way we do it with autotools.
- Follow-up to d5c0351055d5709da8f3e16c91348092fdb481aa #2747
- - lib/curl_trc.h: use `CURL_FORMAT()`, this also fixes it to enable format
- checks. Previously it was always disabled due to the internal `printf`
- macro.
-
- Fix them:
-
- - fix bug where an `set_ipv6_v6only()` call was missed in builds with
- `--disable-verbose` / `CURL_DISABLE_VERBOSE_STRINGS=ON`.
- - add internal `FALLTHROUGH()` macro.
- - replace obsolete fall-through comments with `FALLTHROUGH()`.
- - fix fallthrough markups: Delete redundant ones (showing up as
- warnings in most cases). Add missing ones. Fix indentation.
- - silence `-Wformat-nonliteral` warnings with llvm/clang.
- - fix one `-Wformat-nonliteral` warning.
- - fix new `-Wformat` and `-Wformat-security` warnings.
- - fix `CURL_FORMAT_SOCKET_T` value for mingw-w64. Also move its
- definition to `lib/curl_setup.h` allowing use in `tests/server`.
- - lib: fix two wrongly passed string arguments in log outputs.
- Co-authored-by: Jay Satiro
- - fix new `-Wformat` warnings on mingw-w64.
-
- [1] https://github.com/ossf/wg-best-practices-os-developers/blob/56c0fde3895b
- fc55c8a973ef49a2572c507b2ae1/docs/Compiler-Hardening-Guides/Compiler-Options-
- Hardening-Guide-for-C-and-C%2B%2B.md
-
- Closes #12489
-
-- Makefile.mk: drop Windows support
-
- And DLL-support with it. This leaves `Makefile.mk` for MS-DOS and Amiga.
-
- We recommend CMake instead. With unity mode it's much faster, and about
- the same without.
-
- Ref: https://github.com/curl/curl/pull/12221#issuecomment-1783761806
- Reviewed-by: Daniel Stenberg
- Closes #12224
-
-Daniel Stenberg (16 Dec 2023)
-
-- cmdline-docs: use .IP consistently
-
- Remove use of .TP and some .B. The idea is to reduce nroff syntax as
- much as possible and to use it consistently. Ultimately, we should be
- able to introduce our own easier-to-use-and-read syntax/formatting and
- convert on generation time.
-
- Closes #12535
-
-Tatsuhiko Miyagawa (16 Dec 2023)
-
-- http: fix off-by-one error in request method length check
-
- It should allow one more byte.
-
- Closes #12534
-
-Daniel Stenberg (15 Dec 2023)
-
-- curl: show ipfs and ipns as supported "protocols"
-
- They are accepted schemes in URLs passed to curl (the tool, not the
- library).
-
- Also makes curl-config show the same list.
-
- Co-Authored-by: Jay Satiro
- Reported-by: Chara White
- Bug: https://curl.se/mail/archive-2023-12/0026.html
- Closes #12508
-
-- Revert "urldata: move async resolver state from easy handle to connectdata"
-
- This reverts commit 56a4db2e4e2bcb9a0dcb75b83560a78ef231fcc8 (#12198)
-
- We want the c-ares channel to be held in the easy handle, not per
- connection - for performance.
-
- Closes #12524
-
-Viktor Szakats (15 Dec 2023)
-
-- openssl: re-match LibreSSL deinit with init
-
- Earlier we switched to use modern initialization with LibreSSL v2.7.0
- and up, but did not touch deinitialization [1]. Fix it in this patch.
-
- Regression from bec0c5bbf34369920598678161d2df8bea0e243b #11611
-
- [1] https://github.com/curl/curl/pull/11611#issuecomment-1668654014
-
- Reported-by: Mike Hommey
- Reviewed-by: Daniel Stenberg
- Fixes #12525
- Closes #12526
-
-Daniel Stenberg (14 Dec 2023)
-
-- libssh: supress warnings without version check
-
- Define unconditionally.
-
- Follow-up from d21bd2190c46ad7fa
-
- Closes #12523
-
-- hostip: return error immediately when Curl_ip2addr() fails
-
- Closes #12522
-
-Theo (14 Dec 2023)
-
-- libssh: improve the deprecation warning dismissal
-
- Previous code was compiler dependant, and dismissed all deprecation warnings
- indiscriminately.
-
- libssh provides a way to disable the deprecation warnings for libssh only, an
- d
- naturally this is the preferred way.
-
- This commit uses that, to prevent the erroneous hiding of potential, unrelate
- d
- deprecation warnings.
-
- Fixes #12519
- Closes #12520
-
-Daniel Stenberg (14 Dec 2023)
-
-- test1474: removed
-
- The test was already somewhat flaky and disabled on several platforms,
- and after 1da640abb688 even more unstable.
-
-- readwrite_data: loop less
-
- This function is made to loop in order to drain incoming data
- faster. Completely removing the loop has a measerably negative impact on
- transfer speeds.
-
- Downsides with the looping include
-
- - it might call the progress callback much more seldom. Especially if
- the write callback is slow.
-
- - rate limiting becomes less exact
-
- - a single transfer might "starve out" other parallel transfers
-
- - QUIC timers for other connections can't be maintained correctly
-
- The long term fix should be to remove the loop and optimize coming back
- to avoid the transfer speed penalty.
-
- This fix lower the max loop count to reduce the starvation problem, and
- avoids the loop completely for when rate-limiting is in progress.
-
- Ref: #12488
- Ref: https://curl.se/mail/lib-2023-12/0012.html
- Closes #12504
-
-Stefan Eissing (14 Dec 2023)
-
-- lib: eliminate `conn->cselect_bits`
-
- - use `data->state.dselect_bits` everywhere instead
- - remove `bool *comeback` parameter as non-zero
- `data->state.dselect_bits` will indicate that IO is
- incomplete.
-
- Closes #12512
-
-- connect: refactor `Curl_timeleft()`
-
- - less local vars, "better" readability
- - added documentation
-
- Closes #12518
-
-Dmitry Karpov (14 Dec 2023)
-
-- cookie: avoid fopen with empty file name
-
- Closes #12514
-
-Viktor Szakats (13 Dec 2023)
-
-- tests/server: delete workaround for old-mingw
-
- mingw-w64 1.0 comes with w32api v3.12, thus doesn't need this.
-
- Follow-up to 38029101e2d78ba125732b3bab6ec267b80a0e72 #11625
-
- Reviewed-by: Jay Satiro
- Closes #12510
-
-- cmake: delete obsolete TODOs more [ci skip]
-
- - manual completed: 898b012a9bf388590c4be7f526815b5ab74feca1 #1288
- - soname completed: 5de6848f104d7cb0017080e31216265ac19d0dde #10023
- - bunch of others that are completed
- - `NTLM_WB_ENABLED` is implemented in a basic form, and now also
- scheduled for removal, so a TODO at this point isn't useful.
-
- And this 'to-check' item:
-
- Q: "The cmake build selected to run gcc with -fPIC on my box while the
- plain configure script did not."
-
- A: With CMake, since 2ebc74c36a19a1700af394c16855ce144d9878e3 #11546
- and fc9bfb14520712672b4784e8b48256fb29204011 #11627, we explicitly
- enable PIC for libcurl shared lib. Or when building libcurl for
- shared and static lib in a single pass. We do this by default for
- Windows or when enabled by the user via `SHARE_LIB_OBJECT`.
- Otherwise we don't touch this setting. Meaning the default set by
- CMake (if any) or the toolchain is used. On Debian Bookworm, this
- means that PIC is disabled for static libs by default. Some platforms
- (like macOS), has PIC enabled by default.
- autotools supports the double-pass mode only, and in that case
- CMake seems to match PIC behaviour now (as tested on Linux with gcc.)
-
- Follow-up to 5d5dfdbd1a6c40bd75e982b66f49e1fa3a7eeae7 #12500
-
- Reviewed-by: Jay Satiro
- Closes #12509
-
-Stefan Eissing (12 Dec 2023)
-
-- CLIENT-WRITERS: design and use documentation
-
- Closes #12507
-
-Viktor Szakats (12 Dec 2023)
-
-- cmake: delete obsolete TODO items [ci skip]
-
- There is always room for improvement, but CMake is up to par now with
- autotools, so there is no longer a good reason to keep around these
- inline TODO items.
-
- Answering one of questions:
-
- Q: "The gcc command line use neither -g nor any -O options. As a
- developer, I also treasure our configure scripts's --enable-debug
- option that sets a long range of "picky" compiler options."
-
- A: CMake offers the `CMAKE_BUILD_TYPE` variable to control debug info
- and optimization level. E.g.:
- - `Release` = `-O3` + no debug info
- - `MinSizeRel` = `-Os` + no debug info
- - `Debug` = `-O0` + debug info
-
- https://stackoverflow.com/questions/48754619/what-are-cmake-build-type-deb
- ug-release-relwithdebinfo-and-minsizerel/59314670#59314670
- https://cmake.org/cmake/help/latest/manual/cmake-buildsystem.7.html#defaul
- t-and-custom-configurations
-
- For picky warnings we have the `PICKY_COMPILER` options, enabled by
- default.
-
- Closes #12500
-
-Stefan Eissing (11 Dec 2023)
-
-- CONNECTION-FILTERS: update documentation
-
- Closes #12497
-
-Daniel Stenberg (11 Dec 2023)
-
-- lib: reduce use of strncpy
-
- - bearssl: select cipher without buffer copies
- - http_aws_sigv4: avoid strncpy, require exact timestamp length
- - http_aws_sigv4: use memcpy isntead of strncpy
- - openssl: avoid strncpy calls
- - schannel: check for 1.3 algos without buffer copies
- - strerror: avoid strncpy calls
- - telnet: avoid strncpy, return error on too long inputs
- - vtls: avoid strncpy in multissl_version()
-
- Closes #12499
-
-- CI/distcheck: run full tests
-
- To be able to detect missing files better, this now runs the full CI
- test suite. If done before, it would have detected #12462 before
- release.
-
- Closes #12503
-
-- docs: clean up Protocols: for cmdline options
-
- ... and some other minor polish.
-
- Closes #12496
-
-- cmdline/gen: fix the sorting of the man page options
-
- They were previously sorted based on the file names, which use a .d
- extension, making "data" get placed after "data-binary" etc. Making the
- sort ignore the extention fixes the ordering.
-
- Reported-by: Boris Verkhovskiy
- Bug: https://curl.se/mail/archive-2023-12/0014.html
- Closes #12494
-
-Daniel Gustafsson (9 Dec 2023)
-
-- doh: remove unused local variable
-
- The nurl variable is no longer used during probing following
- a refactoring, so remove.
-
- Closes #12491
-
-Jay Satiro (8 Dec 2023)
-
-- build: fix Windows ADDRESS_FAMILY detection
-
- - Include winsock2.h for Windows ADDRESS_FAMILY detection.
-
- Prior to this change cmake detection didn't work because it included
- ws2def.h by itself, which is missing needed types from winsock2.h.
-
- Prior to this change autotools detection didn't work because it did not
- include any Windows header.
-
- In both cases libcurl would fall back on unsigned short as the address
- family type, which is the same as ADDRESS_FAMILY.
-
- Co-authored-by: Viktor Szakats
-
- Closes https://github.com/curl/curl/pull/12441
-
-Daniel Stenberg (8 Dec 2023)
-
-- lib: rename Curl_strndup to Curl_memdup0 to avoid misunderstanding
-
- Since the copy does not stop at a null byte, let's not call it anything
- that makes you think it works like the common strndup() function.
-
- Based on feedback from Jay Satiro, Stefan Eissing and Patrick Monnerat
-
- Closes #12490
-
-- convsrctest.pl: removed: not used, not shipped in tarballs
-
-- tests: rename tests scripts to the test number
-
- It is hard to name the scripts sensibly. Lots of them are similarly
- named and the name did not tell which test that used them.
-
- The new approach is rather to name them based on the test number that
- runs them. Also helps us see which scripts are for individual tests
- rather than for general test infra.
-
- - badsymbols.pl -> test1167.pl
- - check-deprecated.pl -> test1222.pl
- - check-translatable-options.pl -> test1544.pl
- - disable-scan.pl -> test1165.pl
- - error-codes.pl -> test1175.pl
- - errorcodes.pl -> test1477.pl
- - extern-scan.pl -> test1135.pl
- - manpage-scan.pl -> test1139.pl
- - manpage-syntax.pl -> test1173.pl
- - markdown-uppercase.pl -> test1275.pl
- - mem-include-scan.pl -> test1132.pl
- - nroff-scan.pl -> test1140.pl
- - option-check.pl -> test1276.pl
- - options-scan.pl -> test971.pl
- - symbol-scan.pl -> test1119.pl
- - version-scan.pl -> test1177.pl
-
- Closes #12487
-
-Michał Antoniak (8 Dec 2023)
-
-- sendf: fix compiler warning with CURL_DISABLE_HEADERS_API
-
- fix MSVC warning C4189: 'htype': local variable is initialized but not
- referenced - when CURL_DISABLE_HEADERS_API is defined.
-
- Closes #12485
-
-Viktor Szakats (8 Dec 2023)
-
-- tidy-up: whitespace
-
- Closes #12484
-
-Stefan Eissing (7 Dec 2023)
-
-- test_02_download: fix paramters to test_02_27
-
- - it is a special client that only ever uses http/2
-
- Closes #12467
-
-Michał Antoniak (7 Dec 2023)
-
-- vtls: remove the Curl_cft_ssl_proxy object if CURL_DISABLE_PROXY
-
- Closes #12459
-
-Daniel Stenberg (7 Dec 2023)
-
-- lib: strndup/memdup instead of malloc, memcpy and null-terminate
-
- - bufref: use strndup
- - cookie: use strndup
- - formdata: use strndup
- - ftp: use strndup
- - gtls: use aprintf instead of malloc + strcpy * 2
- - http: use strndup
- - mbedtls: use strndup
- - md4: use memdup
- - ntlm: use memdup
- - ntlm_sspi: use strndup
- - pingpong: use memdup
- - rtsp: use strndup instead of malloc, memcpy and null-terminate
- - sectransp: use strndup
- - socks_gssapi.c: use memdup
- - vtls: use dynbuf instead of malloc, snprintf and memcpy
- - vtls: use strdup instead of malloc + memcpy
- - wolfssh: use strndup
-
- Closes #12453
-
-- strdup: remove the memchr check from Curl_strndup
-
- It makes it possible to clone a binary chunk of data.
-
- Closes #12453
-
-- ftp: handle the PORT parsing without allocation
-
- Also reduces amount of *cpy() calls.
-
- Closes #12456
-
-- RELEASE-NOTES: synced
-
- Bumped to 8.5.1
-
-- url: for disabled protocols, mention if found in redirect
-
- To help users better understand where the URL (and denied scheme) comes
- from. Also removed "in libcurl" from the message, since the disabling
- can be done by the application.
-
- The error message now says "not supported" or "disabled" depending on
- why it was denied:
-
- Protocol "hej" not supported
- Protocol "http" disabled
-
- And in redirects:
-
- Protocol "hej" not supported (in redirect)
- Protocol "http" disabled (in redirect)
-
- Reported-by: Mauricio Scheffer
- Fixes #12465
- Closes #12469
-
-Stefan Eissing (6 Dec 2023)
-
-- sectransp_ make TLSCipherNameForNumber() available in non-verbose config
-
- Reported-by: Cajus Pollmeier
- Closes #12476
- Fixes #12474
-
-YX Hao (6 Dec 2023)
-
-- lib: fix variable undeclared error caused by `infof` changes
-
- `--disable-verbose` yields `CURL_DISABLE_VERBOSE_STRINGS` defined.
- `infof` isn't `Curl_nop_stmt` anymore: dac293c.
-
- Follow-up to dac293c
-
- Closes #12470
-
-Viktor Szakats (6 Dec 2023)
-
-- tidy-up: fix yamllint whitespace issues in labeler.yml
-
- Follow-up to bda212911457c6fadfbba50be61afc4ca513fa56 #12466
-
- Reviewed-by: Dan Fandrich
- Closes #12475
-
-- tidy-up: fix yamllint whitespace issues
-
- Closes #12466
-
-Chris Sauer (6 Dec 2023)
-
-- cmake: fix typo
-
- Follow-up to aace27b
- Closes #12464
-
-Daniel Stenberg (6 Dec 2023)
-
-- dist: add tests/errorcodes.pl to the tarball
-
- Used by test 1477
-
- Reported-by: Xi Ruoyao
- Follow-up to 0ca3a4ec9a7
- Fixes #12462
- Closes #12463
-
-Dan Fandrich (6 Dec 2023)
-
-- github/labeler: update a missed key in the v5 upgrade
-
- Follow-up to ce03fe3ba
-
-Version 8.5.0 (6 Dec 2023)
-
-Daniel Stenberg (6 Dec 2023)
-
-- RELEASE-NOTES: synced
-
- The curl 8.5.0 release.
-
-Dan Fandrich (5 Dec 2023)
-
-- github/labeler: switch from the beta to labeler v5
-
- Some keys were renamed and the dot option was made default.
-
- Closes #12458
-
-Daniel Stenberg (5 Dec 2023)
-
-- DEPRECATE: remove NTLM_WB in June 2024
-
- Ref: https://curl.se/mail/lib-2023-12/0010.html
-
- Closes #12451
-
-Jacob Hoffman-Andrews (4 Dec 2023)
-
-- rustls: implement connect_blocking
-
- Closes #11647
-
-Daniel Stenberg (4 Dec 2023)
-
-- examples/rtsp-options.c: add
-
- Just a bare bones RTSP example using CURLOPT_RTSP_SESSION_ID and
- CURLOPT_RTSP_REQUEST set to CURL_RTSPREQ_OPTIONS.
-
- Closes #12452
-
-Stefan Eissing (4 Dec 2023)
-
-- ngtcp2: ignore errors on unknown streams
-
- - expecially in is_alive checks on connections, we might
- see incoming packets on streams already forgotten and closed,
- leading to errors reported by nghttp3. Ignore those.
-
- Closes #12449
-
-Daniel Stenberg (4 Dec 2023)
-
-- docs: make all examples in all libcurl man pages compile
-
- Closes #12448
-
-- checksrc.pl: support #line instructions
-
- makes it identify the correct source file and line
-
-- GHA/man-examples: verify libcurl man page examples
-
-- verify-examples.pl: verify that all man page examples compile clean
-
-- RELEASE-NOTES: synced
-
-Graham Campbell (2 Dec 2023)
-
-- http3: bump ngtcp2 and nghttp3 versions
-
- nghttp3 v1.1.0
- ngtcp2 v1.1.0
-
- In docs and CI
-
- Closes #12446
-
-- CI/quiche: use `3.1.4+quic` consistently in CI workflows
-
- Closes #12447
-
-Viktor Szakats (2 Dec 2023)
-
-- test1545: disable deprecation warnings
-
- Fixes:
- https://ci.appveyor.com/project/curlorg/curl/builds/48631551/job/bhx74e0i66yr
- p6pk#L1205
-
- Same with details:
- https://ci.appveyor.com/project/curlorg/curl/builds/48662893/job/ol8a78q9gmil
- b6wt#L1263
- ```
- tests/libtest/lib1545.c:38:3: error: 'curl_formadd' is deprecated: since 7.56
- .0. Use curl_mime_init() [-Werror=deprecated-declarations]
- 38 | curl_formadd(&m_formpost, &lastptr, CURLFORM_COPYNAME, "file",
- | ^~~~~~~~~~~~
- [...]
- ```
-
- Follow-up to 07a3cd83e0456ca17dfd8c3104af7cf45b7a1ff5 #12421
-
- Fixes #12445
- Closes #12444
-
-Daniel Stenberg (2 Dec 2023)
-
-- INSTALL: update list of ports and CPU archs
-
-- symbols-in-versions: the CLOSEPOLICY options are deprecated
-
- The were used with the CURLOPT_CLOSEPOLICY option, which *never* worked.
-
-z2_ (1 Dec 2023)
-
-- build: fix builds that disable protocols but not digest auth
-
- - Build base64 functions if digest auth is not disabled.
-
- Prior to this change if some protocols were disabled but not digest auth
- then a build error would occur due to missing base64 functions.
-
- Fixes https://github.com/curl/curl/issues/12440
- Closes https://github.com/curl/curl/pull/12442
-
-Michał Antoniak (1 Dec 2023)
-
-- connect: reduce number of transportation providers
-
- Use only the ones necessary - the ones that are built-in. Saves a few
- bytes in the resulting code.
-
- Closes #12438
-
-David Benjamin (1 Dec 2023)
-
-- vtls: consistently use typedef names for OpenSSL structs
-
- The foo_st names don't appear in OpenSSL public API documentation. The
- FOO typedefs are more common. This header was already referencing
- SSL_CTX via <openssl/ssl.h>. There is a comment about avoiding
- <openssl/x509v3.h>, but OpenSSL actually declares all the typedefs in
- <openssl/ossl_typ.h>, which is already included by <openssl/ssl.h> (and
- every other OpenSSL header), so just use that. Though I've included it
- just to be explicit.
-
- (I'm also fairly sure including <openssl/ssl.h> already triggers the
- Schannel conflicts anyway. The comment was probably just out of date.)
-
- Closes #12439
-
-Lau (1 Dec 2023)
-
-- libcurl-security.3: fix typo
-
- Fixed minimal typo.
-
- Closes #12437
-
-Stefan Eissing (1 Dec 2023)
-
-- ngtcp2: fix races in stream handling
-
- - fix cases where ngtcp2 invokes callbacks on streams that
- nghttp3 has already forgotten. Ignore the NGHTTP3_ERR_STREAM_NOT_FOUND
- in these cases as it is normal behaviour.
-
- Closes #12435
-
-Emanuele Torre (1 Dec 2023)
-
-- tool_writeout_json: fix JSON encoding of non-ascii bytes
-
- char variables if unspecified can be either signed or unsigned depending
- on the platform according to the C standard; in most platforms, they are
- signed.
-
- This meant that the *i<32 waas always true for bytes with the top bit
- set. So they were always getting encoded as \uXXXX, and then since they
- were also signed negative, they were getting extended with 1s causing
- '\xe2' to be expanded to \uffffffe2, for example:
-
- $ curl --variable 'v=“' --expand-write-out '{{v:json}}\n' file:///dev/nul
- l
- \uffffffe2\uffffff80\uffffff9c
-
- I fixed this bug by making the code use explicitly unsigned char*
- variables instead of char* variables.
-
- Test 268 verifies
-
- Reported-by: iconoclasthero
- Closes #12434
-
-Stefan Eissing (1 Dec 2023)
-
-- cf-socket: TCP trace output local address used in connect
-
- Closes #12427
-
-Jay Satiro (1 Dec 2023)
-
-- CURLINFO_PRETRANSFER_TIME_T.3: fix time explanation
-
- - Change CURLINFO_PRETRANSFER_TIME_T explanation to say that it
- includes protocol-specific instructions that trigger a transfer.
-
- Prior to this change it explicitly said that it did not include those
- instructions in the time, but that is incorrect.
-
- The change is a copy of the fixed explanation already in
- CURLINFO_PRETRANSFER_TIME, fixed by ec8dcd7b.
-
- Reported-by: eeverettrbx@users.noreply.github.com
-
- Fixes https://github.com/curl/curl/issues/12431
- Closes https://github.com/curl/curl/pull/12432
-
-Daniel Stenberg (30 Nov 2023)
-
-- multi: during ratelimit multi_getsock should return no sockets
-
- ... as there is nothing to wait for then, it just waits. Otherwise, this
- causes much more CPU work and updates than necessary during ratelimit
- periods.
-
- Ref: https://curl.se/mail/lib-2023-11/0056.html
- Closes #12430
-
-Dmitry Karpov (30 Nov 2023)
-
-- transfer: abort pause send when connection is marked for closing
-
- This handles cases of some bi-directional "upgrade" scenarios
- (i.e. WebSockets) where sending is paused until some "upgrade" handshake
- is completed, but server rejects the handshake and closes the
- connection.
-
- Closes #12428
-
-Daniel Stenberg (28 Nov 2023)
-
-- RELEASE-NOTES: synced
-
-- openssl: when a session-ID is reused, skip OCSP stapling
-
- Fixes #12399
- Reported-by: Alexey Larikov
- Closes #12418
-
-- test1545: test doing curl_formadd twice with missing file
-
- Reproduces #12410
- Verifies the fix
- Closes #12421
-
-- Curl_http_body: cleanup properly when Curl_getformdata errors
-
- Reported-by: yushicheng7788 on github
- Based-on-work-by: yushicheng7788 on github
- Fixes #12410
- Closes #12421
-
-- test1477: verify that libcurl-errors.3 and public headers are synced
-
- The script errorcodes.pl extracts all error codes from all headers and
- checks that they are all documented, then checks that all documented
- error codes are also specified in a header file.
-
- Closes #12424
-
-- libcurl-errors.3: sync with current public headers
-
- Closes #12424
-
-Stefan Eissing (28 Nov 2023)
-
-- test459: fix for parallel runs
-
- - change warniing message to work better with varying filename
- length.
- - adapt test output check to new formatting
-
- Follow-up to 97ccc4479f77ba3191c6
- Closes #12423
-
-Daniel Stenberg (27 Nov 2023)
-
-- tool_cb_prg: make the carriage return fit for wide progress bars
-
- When the progress bar was made max width (256 columns), the fly()
- function attempted to generate its output buffer too long so that the
- trailing carriage return would not fit and then the output would show
- wrongly. The fly function is called when the expected total transfer is
- unknown, which could be one or more progress calls before the actual
- progress meter get shown when the expected transfer size is provided.
-
- This new take also replaces the msnprintf() call with a much simpler
- memset() for speed.
-
- Reported-by: Tim Hill
- Fixes #12407
- Closes #12415
-
-- tool_parsecfg: make warning output propose double-quoting
-
- When the config file parser detects a word that *probably* should be
- quoted, mention double-quotes as a possible remedy.
-
- Test 459 verifies.
-
- Proposed-by: Jiehong on github
- Fixes #12409
- Closes #12412
-
-Jay Satiro (26 Nov 2023)
-
-- curl.rc: switch out the copyright symbol for plain ASCII
-
- .. like we already do for libcurl.rc.
-
- libcurl.rc copyright symbol used to cause a "non-ascii 8-bit codepoint"
- warning so it was switched to ascii.
-
- Ref: https://github.com/curl/curl/commit/1ca62bb5#commitcomment-133474972
-
- Suggested-by: Robert Southee
-
- Closes https://github.com/curl/curl/pull/12403
-
-Daniel Stenberg (26 Nov 2023)
-
-- conncache: use the closure handle when disconnecting surplus connections
-
- Use the closure handle for disconnecting connection cache entries so
- that anything that happens during the disconnect is not stored and
- associated with the 'data' handle which already just finished a transfer
- and it is important that details from the unrelated disconnect does not
- taint meta-data in the data handle.
-
- Like storing the response code.
-
- This also adjust test 1506. Unfortunately it also removes a key part of
- the test that verifies that a connection is closed since when this
- output vanishes (because the closure handle is used), we don't know
- exactly that the connection actually gets closed in this test...
-
- Reported-by: ohyeaah on github
- Fixes #12367
- Closes #12405
-
-- RELEASE-NOTES: synced
-
-Stefan Eissing (24 Nov 2023)
-
-- quic: make eyeballers connect retries stop at weird replies
-
- - when a connect immediately goes into DRAINING state, do
- not attempt retries in the QUIC connection filter. Instead,
- return CURLE_WEIRD_SERVER_REPLY
- - When eyeballing, interpret CURLE_WEIRD_SERVER_REPLY as an
- inconclusive answer. When all addresses have been attempted,
- rewind the address list once on an inconclusive answer.
- - refs #11832 where connects were retried indefinitely until
- the overall timeout fired
-
- Closes #12400
-
-Daniel Stenberg (24 Nov 2023)
-
-- CI: verify libcurl function SYNPOSIS sections
-
- With the .github/scripits/verify-synopsis.pl script
-
- Closes #12402
-
-- docs/libcurl: SYNSOPSIS cleanup
-
- - use the correct include file
- - make sure they are declared as in the header file
- - fix minor nroff syntax mistakes (missing .fi)
-
- These are verified by verify-synopsis.pl, which extracts the SYNPOSIS
- code and runs it through gcc.
-
- Closes #12402
-
-- sendf: fix comment typo
-
-- fopen: allocate the dir after fopen
-
- Move the allocation of the directory name down to after the fopen() call
- to allow that shortcut code path to avoid a superfluous malloc+free
- cycle.
-
- Follow-up to 73b65e94f35311
-
- Closes #12398
-
-Stefan Eissing (24 Nov 2023)
-
-- transfer: cleanup done+excess handling
-
- - add `SingleRequest->download_done` as indicator that
- all download bytes have been received
- - remove `stop_reading` bool from readwrite functions
- - move excess body handling into client download writer
-
- Closes #12371
-
-Daniel Stenberg (23 Nov 2023)
-
-- fopen: create new file using old file's mode
-
- Because the function renames the temp file to the target name as a last
- step, if the file was previously owned by a different user, not ORing
- the old mode could otherwise end up creating a file that was no longer
- readable by the original owner after save.
-
- Reported-by: Loïc Yhuel
- Fixes #12299
- Closes #12395
-
-- test1476: require proxy
-
- Follow-up from 323df4261c3542
-
- Closes #12394
-
-- fopen: create short(er) temporary file name
-
- Only using random letters in the name plus a ".tmp" extension. Not by
- appending characters to the final file name.
-
- Reported-by: Maksymilian Arciemowicz
-
- Closes #12388
-
-Stefan Eissing (23 Nov 2023)
-
-- tests: git ignore generated second-hsts.txt file
-
- File is generated in test lib1900
-
- Follow-up to 7cb03229d9e9c5
-
- Closes #12393
-
-Viktor Szakats (23 Nov 2023)
-
-- openssl: enable `infof_certstack` for 1.1 and LibreSSL 3.6
-
- Lower the barrier to enable `infof_certstack()` from OpenSSL 3 to
- OpenSSL 1.1.x, and LibreSSL 3.6 or upper.
-
- With the caveat, that "group name" and "type name" are missing from
- the log output with these TLS backends.
-
- Follow-up to b6e6d4ff8f253c8b8055bab9d4d6a10f9be109f3 #12030
-
- Reviewed-by: Daniel Stenberg
- Closes #12385
-
-Daniel Stenberg (23 Nov 2023)
-
-- urldata: fix typo in comment
-
-- CI: codespell
-
- The list of words to ignore is in the file
- .github/scripts/codespell-ignore.txt
-
- Closes #12390
-
-- lib: fix comment typos
-
- Five separate ones, found by codespell
-
- Closes #12390
-
-- test1476: verify cookie PSL mixed case
-
-- cookie: lowercase the domain names before PSL checks
-
- Reported-by: Harry Sintonen
-
- Closes #12387
-
-Viktor Szakats (23 Nov 2023)
-
-- openssl: fix building with v3 `no-deprecated` + add CI test
-
- - build quictls with `no-deprecated` in CI to have test coverage for
- this OpenSSL 3 configuration.
-
- - don't call `OpenSSL_add_all_algorithms()`, `OpenSSL_add_all_digests()`.
- The caller code is meant for OpenSSL 3, while these two functions were
- only necessary before OpenSSL 1.1.0. They are missing from OpenSSL 3
- if built with option `no-deprecated`, causing build errors:
- ```
- vtls/openssl.c:4097:3: error: call to undeclared function 'OpenSSL_add_all_
- algorithms'; ISO C99 and later do not support implicit function declaration
- s [-Wimplicit-function-declaration]
- vtls/openssl.c:4098:3: error: call to undeclared function 'OpenSSL_add_all_
- digests'; ISO C99 and later do not support implicit function declarations [
- -Wimplicit-function-declaration]
- ```
- Ref: https://ci.appveyor.com/project/curlorg/curl-for-win/builds/48587418?f
- ullLog=true#L7667
-
- Regression from b6e6d4ff8f253c8b8055bab9d4d6a10f9be109f3 #12030
- Bug: https://github.com/curl/curl/issues/12380#issuecomment-1822944669
- Reviewed-by: Alex Bozarth
-
- - vquic/curl_ngtcp2: fix using `SSL_get_peer_certificate` with
- `no-deprecated` quictls 3 builds.
- Do it by moving an existing solution for this from `vtls/openssl.c`
- to `vtls/openssl.h` and adjusting caller code.
- ```
- vquic/curl_ngtcp2.c:1950:19: error: implicit declaration of function 'SSL_g
- et_peer_certificate'; did you mean 'SSL_get1_peer_certificate'? [-Wimplicit
- -function-declaration]
- ```
- Ref: https://github.com/curl/curl/actions/runs/6960723097/job/18940818625#s
- tep:24:1178
-
- - curl_ntlm_core: fix `-Wunused-parameter`, `-Wunused-variable` and
- `-Wunused-function` when trying to build curl with NTLM enabled but
- without the necessary TLS backend (with DES) support.
-
- Closes #12384
-
-- curl.h: delete Symbian OS references
-
- curl deprecated Symbian OS in 3d64031fa7a80ac4ae3fd09a5939196268b92f81
- via #5989. Delete references to it from public headers, because there
- is no fresh release to use those headers with.
-
- Reviewed-by: Dan Fandrich
- Reviewed-by: Jay Satiro
- Closes #12378
-
-- windows: use built-in `_WIN32` macro to detect Windows
-
- Windows compilers define `_WIN32` automatically. Windows SDK headers
- or build env defines `WIN32`, or we have to take care of it. The
- agreement seems to be that `_WIN32` is the preferred practice here.
- Make the source code rely on that to detect we're building for Windows.
-
- Public `curl.h` was using `WIN32`, `__WIN32__` and `CURL_WIN32` for
- Windows detection, next to the official `_WIN32`. After this patch it
- only uses `_WIN32` for this. Also, make it stop defining `CURL_WIN32`.
-
- There is a slight chance these break compatibility with Windows
- compilers that fail to define `_WIN32`. I'm not aware of any obsolete
- or modern compiler affected, but in case there is one, one possible
- solution is to define this macro manually.
-
- grepping for `WIN32` remains useful to discover Windows-specific code.
-
- Also:
-
- - extend `checksrc` to ensure we're not using `WIN32` anymore.
-
- - apply minor formatting here and there.
-
- - delete unnecessary checks for `!MSDOS` when `_WIN32` is present.
-
- Co-authored-by: Jay Satiro
- Reviewed-by: Daniel Stenberg
-
- Closes #12376
-
-Stefan Eissing (22 Nov 2023)
-
-- url: ConnectionExists revisited
-
- - have common pattern of `if not match, continue`
- - revert pages long if()s to return early
- - move dead connection check to later since it may
- be relatively expensive
- - check multiuse also when NOT building with NGHTTP2
- - for MULTIUSE bundles, verify that the inspected
- connection indeed supports multiplexing when in use
- (bundles may contain a mix of connection, afaict)
-
- Closes #12373
-
-Daniel Stenberg (22 Nov 2023)
-
-- CURLMOPT_MAX_CONCURRENT_STREAMS: make sure the set value is within range
-
- ... or use the default value.
-
- Also clarify the documentation language somewhat.
-
- Closes #12382
-
-- urldata: make maxconnects a 32 bit value
-
- "2^32 idle connections ought to be enough for anybody"
-
- Closes #12375
-
-- FEATURES: update the URL phrasing
-
- The URL is length limited since a while back so "no limit" simply is not
- true anymore. Mention the URL RFC standard used instead.
-
- Closes #12383
-
-- wolfssh: remove redundant static prototypes
-
- vssh/wolfssh.c:346:18: error: redundant redeclaration of ‘wscp_recv’ [-We
- rror=redundant-decls]
-
- Closes #12381
-
-- setopt: remove superfluous use of ternary expressions
-
- Closes #12374
-
-- mime: store "form escape" as a single bit
-
- Closes #12374
-
-- setopt: check CURLOPT_TFTP_BLKSIZE range on set
-
- ... instead of later when the transfer is about to happen.
-
- Closes #12374
-
-Viktor Szakats (21 Nov 2023)
-
-- build: add more picky warnings and fix them
-
- Enable more picky compiler warnings. I've found these options in the
- nghttp3 project when implementing the CMake quick picky warning
- functionality for it [1].
-
- `-Wunused-macros` was too noisy to keep around, but fixed a few issues
- it revealed while testing.
-
- - autotools: reflect the more precisely-versioned clang warnings.
- Follow-up to 033f8e2a08eb1d3102f08c4d8c8e85470f8b460e #12324
- - autotools: sync between clang and gcc the way we set `no-multichar`.
- - autotools: avoid setting `-Wstrict-aliasing=3` twice.
- - autotools: disable `-Wmissing-noreturn` for MSYS gcc targets [2].
- It triggers in libtool-generated stub code.
-
- - lib/timeval: delete a redundant `!MSDOS` guard from a `WIN32` branch.
-
- - lib/curl_setup.h: delete duplicate declaration for `fileno`.
- Added in initial commit ae1912cb0d494b48d514d937826c9fe83ec96c4d
- (1999-12-29). This suggests this may not be needed anymore, but if
- it does, we may restore this for those specific (non-Windows) systems.
- - lib: delete unused macro `FTP_BUFFER_ALLOCSIZE` since
- c1d6fe2aaa5a26e49a69a4f2495b3cc7a24d9394.
- - lib: delete unused macro `isxdigit_ascii` since
- f65f750742068f579f4ee6d8539ed9d5f0afcb85.
- - lib/mqtt: delete unused macro `MQTT_HEADER_LEN`.
- - lib/multi: delete unused macro `SH_READ`/`SH_WRITE`.
- - lib/hostip: add `noreturn` function attribute via new `CURL_NORETURN`
- macro.
- - lib/mprintf: delete duplicate declaration for `Curl_dyn_vprintf`.
- - lib/rand: fix `-Wunreachable-code` and related fallouts [3].
- - lib/setopt: fix `-Wunreachable-code-break`.
- - lib/system_win32 and lib/timeval: fix double declarations for
- `Curl_freq` and `Curl_isVistaOrGreater` in CMake UNITY mode [4].
- - lib/warnless: fix double declarations in CMake UNITY mode [5].
- This was due to force-disabling the header guard of `warnless.h` to
- to reapply it to source code coming after `warnless.c` in UNITY
- builds. This reapplied declarations too, causing the warnings.
- Solved by adding a header guard for the lines that actually need
- to be reapplied.
- - lib/vauth/digest: fix `-Wunreachable-code-break` [6].
- - lib/vssh/libssh2: fix `-Wunreachable-code-break` and delete redundant
- block.
- - lib/vtls/sectransp: fix `-Wunreachable-code-break` [7].
- - lib/vtls/sectransp: suppress `-Wunreachable-code`.
- Detected in `else` branches of dynamic feature checks, with results
- known at compile-time, e.g.
- ```c
- if(SecCertificateCopySubjectSummary) /* -> true */
- ```
- Likely fixable as a separate micro-project, but given SecureTransport
- is deprecated anyway, let's just silence these locally.
- - src/tool_help: delete duplicate declaration for `helptext`.
- - src/tool_xattr: fix `-Wunreachable-code`.
- - tests: delete duplicate declaration for `unitfail` [8].
- - tests: delete duplicate declaration for `strncasecompare`.
- - tests/libtest: delete duplicate declaration for `gethostname`.
- Originally added in 687df5c8c39c370a59999b9afc0917d808d978b7
- (2010-08-02).
- Got complicated later: c49e9683b85ba9d12cbb6eebc4ab2c8dba68fbdc
- If there are still systems around with warnings, we may restore the
- prototype, but limited for those systems.
- - tests/lib2305: delete duplicate declaration for
- `libtest_debug_config`.
- - tests/h2-download: fix `-Wunreachable-code-break`.
-
- [1] https://github.com/ngtcp2/nghttp3/blob/a70edb08e954d690e8fb2c1df999b5a056
- f8bf9f/cmake/PickyWarningsC.cmake
- [2] https://ci.appveyor.com/project/curlorg/curl/builds/48553586/job/3qkgjaui
- qla5fj45?fullLog=true#L1675
- [3] https://github.com/curl/curl/actions/runs/6880886309/job/18716044703?pr=1
- 2331#step:7:72
- https://github.com/curl/curl/actions/runs/6883016087/job/18722707368?pr=1
- 2331#step:7:109
- [4] https://ci.appveyor.com/project/curlorg/curl/builds/48555101/job/9g15qkrr
- iklpf1ut#L204
- [5] https://ci.appveyor.com/project/curlorg/curl/builds/48555101/job/9g15qkrr
- iklpf1ut#L218
- [6] https://github.com/curl/curl/actions/runs/6880886309/job/18716042927?pr=1
- 2331#step:7:290
- [7] https://github.com/curl/curl/actions/runs/6891484996/job/18746659406?pr=1
- 2331#step:9:1193
- [8] https://github.com/curl/curl/actions/runs/6882803986/job/18722082562?pr=1
- 2331#step:33:1870
-
- Closes #12331
-
-Daniel Stenberg (21 Nov 2023)
-
-- transfer: avoid unreachable expression
-
- If curl_off_t and size_t have the same size (which is common on modern
- 64 bit systems), a condition cannot occur which Coverity pointed
- out. Avoid the warning by having the code conditionally only used if
- curl_off_t actually is larger.
-
- Follow-up to 1cd2f0072fa482e25baa2
-
- Closes #12370
-
-Stefan Eissing (21 Nov 2023)
-
-- transfer: readwrite improvements
-
- - changed header/chunk/handler->readwrite prototypes to accept `buf`,
- `blen` and a `pconsumed` pointer. They now get the buffer to work on
- and report back how many bytes they consumed
- - eliminated `k->str` in SingleRequest
- - improved excess data handling to properly calculate with any body data
- left in the headerb buffer
- - eliminated `k->badheader` enum to only be a bool
-
- Closes #12283
-
-Daniel Stenberg (21 Nov 2023)
-
-- RELEASE-NOTES: synced
-
-Jiří Hruška (21 Nov 2023)
-
-- transfer: avoid calling the read callback again after EOF
-
- Regression since 7f43f3dc5994d01b12 (7.84.0)
-
- Bug: https://curl.se/mail/lib-2023-11/0017.html
-
- Closes #12363
-
-Daniel Stenberg (21 Nov 2023)
-
-- doh: provide better return code for responses w/o addresses
-
- Previously it was wrongly returning CURLE_OUT_OF_MEMORY when the
- response did not contain any addresses. Now it more accurately returns
- CURLE_COULDNT_RESOLVE_HOST.
-
- Reported-by: lRoccoon on github
-
- Fixes #12365
- Closes #12366
-
-Stefan Eissing (21 Nov 2023)
-
-- HTTP/2, HTTP/3: handle detach of onoing transfers
-
- - refs #12356 where a UAF is reported when closing a connection
- with a stream whose easy handle was cleaned up already
- - handle DETACH events same as DONE events in h2/h3 filters
-
- Fixes #12356
- Reported-by: Paweł Wegner
- Closes #12364
-
-Viktor Szakats (20 Nov 2023)
-
-- autotools: stop setting `-std=gnu89` with `--enable-warnings`
-
- Do not alter the C standard when building with `--enable-warnings` when
- building with gcc.
-
- On one hand this alters warning results compared to a default build.
- On the other, it may produce different binaries, which is unexpected.
-
- Also fix new warnings that appeared after removing `-std=gnu89`:
-
- - include: fix public curl headers to use the correct printf mask for
- `CURL_FORMAT_CURL_OFF_T` and `CURL_FORMAT_CURL_OFF_TU` with mingw-w64
- and Visual Studio 2013 and newer. This fixes the printf mask warnings
- in examples and tests. E.g. [1]
-
- - conncache: fix printf format string [2].
-
- - http2: fix potential null pointer dereference [3].
- (seen on Slackware with gcc 11.)
-
- - libssh: fix printf format string in SFTP code [4].
- Also make MSVC builds compatible with old CRT versions.
-
- - libssh2: fix printf format string in SFTP code for MSVC.
- Applying the same fix as for libssh above.
-
- - unit1395: fix `argument is null` and related issues [5]:
- - stop calling `strcmp()` with NULL to avoid undefined behaviour.
- - fix checking results if some of them were NULL.
- - do not pass NULL to printf `%s`.
-
- - ci: keep a build job with `-std=gnu89` to continue testing for
- C89-compliance. We can apply this to other gcc jobs as needed.
- Ref: b23ce2cee7329bbf425f18b49973b7a5f23dfcb4 (2022-09-23) #9542
-
- [1] https://dev.azure.com/daniel0244/curl/_build/results?buildId=18581&view=l
- ogs&jobId=ccf9cc6d-2ef1-5cf2-2c09-30f0c14f923b
- [2] https://github.com/curl/curl/actions/runs/6896854263/job/18763831142?pr=1
- 2346#step:6:67
- [3] https://github.com/curl/curl/actions/runs/6896854253/job/18763839238?pr=1
- 2346#step:30:214
- [4] https://github.com/curl/curl/actions/runs/6896854253/job/18763838007?pr=1
- 2346#step:29:895
- [5] https://github.com/curl/curl/actions/runs/6896854253/job/18763836775?pr=1
- 2346#step:33:1689
-
- Closes #12346
-
-- autotools: fix/improve gcc and Apple clang version detection
-
- - Before this patch we expected `n.n` `-dumpversion` output, but Ubuntu
- may return `n-win32` (also with `-dumpfullversion`). Causing these
- errors and failing to enable picky warnings:
- ```
- ../configure: line 23845: test: : integer expression expected
- ```
- Ref: https://github.com/libssh2/libssh2/actions/runs/6263453828/job/1700789
- 3718#step:5:143
-
- Fix that by stripping any dash-suffix and handling a dotless (major-only)
- version number by assuming `.0` in that case.
-
- `9.3-posix`, `9.3-win32`, `6`, `9.3.0`, `11`, `11.2`, `11.2.0`
- Ref: https://github.com/mamedev/mame/pull/9767
-
- - fix Apple clang version detection for releases between
- 'Apple LLVM version 7.3.0' and 'Apple LLVM version 10.0.1' where the
- version was under-detected as 3.7 llvm/clang equivalent.
-
- - fix Apple clang version detection for 'Apple clang version 11.0.0'
- and newer where the Apple clang version was detected, instead of its
- llvm/clang equivalent.
-
- - display detected clang/gcc/icc compiler version.
-
- Via libssh2:
- - https://github.com/libssh2/libssh2/commit/00a3b88c51cdb407fbbb347a2e38c5c7d
- 89875ad
- https://github.com/libssh2/libssh2/pull/1187
- - https://github.com/libssh2/libssh2/commit/89ccc83c7da73e7ca3a112e3500081319
- 42b592e
- https://github.com/libssh2/libssh2/pull/1232
-
- Closes #12362
-
-- autotools: delete LCC compiler support bits
-
- Follow-up to fd7ef00f4305a2919e6950def1cf83d0110a4acd #12222
-
- Closes #12357
-
-- cmake: add test for `DISABLE` options, add `CURL_DISABLE_HEADERS_API`
-
- - tests: verify CMake `DISABLE` options.
-
- Make an exception for 2 CMake-only ones, and one more that's
- using a different naming scheme, also in autotools and source.
-
- - cmake: add support for `CURL_DISABLE_HEADERS_API`.
-
- Suggested-by: Daniel Stenberg
- Ref: https://github.com/curl/curl/pull/12345#pullrequestreview-1736238641
-
- Closes #12353
-
-Jacob Hoffman-Andrews (20 Nov 2023)
-
-- hyper: temporarily remove HTTP/2 support
-
- The current design of the Hyper integration requires rebuilding the
- Hyper clientconn for each request. However, building the clientconn
- requires resending the HTTP/2 connection preface, which is incorrect
- from a protocol perspective. That in turn causes servers to send GOAWAY
- frames, effectively degrading performance to "no connection reuse" in
- the best case. It may also be triggering some bugs where requests get
- dropped entirely and reconnects take too long.
-
- This doesn't rule out HTTP/2 support with Hyper, but it may take a
- redesign of the Hyper integration in order to make things work.
-
- Closes #12191
-
-Jay Satiro (20 Nov 2023)
-
-- schannel: fix unused variable warning
-
- Bug: https://github.com/curl/curl/pull/12349#issuecomment-1818000846
- Reported-by: Viktor Szakats
-
- Closes https://github.com/curl/curl/pull/12361
-
-Daniel Stenberg (19 Nov 2023)
-
-- url: find scheme with a "perfect hash"
-
- Instead of a loop to scan over the potentially 30+ scheme names, this
- uses a "perfect hash" table. This works fine because the set of schemes
- is known and cannot change in a build. The hash algorithm and table size
- is made to only make a single scheme index per table entry.
-
- The perfect hash is generated by a separate tool (scripts/schemetable.c)
-
- Closes #12347
-
-- scripts: add schemetable.c
-
- This tool generates a scheme-matching table.
-
- It iterates over a number of different initial and shift values in order
- to find the hash algorithm that needs the smallest possible table.
-
- The generated hash function, table and table size then needs to be used
- by the url.c:Curl_getn_scheme_handler() function.
-
-Stefan Eissing (19 Nov 2023)
-
-- vtls/vquic, keep peer name information together
-
- - add `struct ssl_peer` to keep hostname, dispname and sni
- for a filter
- - allocate `sni` for use in VTLS backend
- - eliminate `Curl_ssl_snihost()` and its use of the download buffer
- - use ssl_peer in SSL and QUIC filters
-
- Closes #12349
-
-Viktor Szakats (18 Nov 2023)
-
-- build: always revert `#pragma GCC diagnostic` after use
-
- Before this patch some source files were overriding gcc warning options,
- but without restoring them at the end of the file. In CMake UNITY builds
- these options spilled over to the remainder of the source code,
- effecitvely disabling them for a larger portion of the codebase than
- intended.
-
- `#pragma clang diagnostic` didn't have such issue in the codebase.
-
- Reviewed-by: Marcel Raad
- Closes #12352
-
-- tidy-up: casing typos, delete unused Windows version aliases
-
- - cmake: fix casing of `UnixSockets` to match the rest of the codebase.
-
- - curl-compilers.m4: fix casing in a comment.
-
- - setup-win32: delete unused Windows version constant aliases.
-
- Reviewed-by: Marcel Raad
- Closes #12351
-
-- keylog: disable if unused
-
- Fully disable keylog code if there is no TLS or QUIC subsystem using it.
-
- Closes #12350
-
-- cmake: add `CURL_DISABLE_BINDLOCAL` option
-
- To match similar autotools option.
-
- Default is `ON`.
-
- Reviewed-by: Daniel Stenberg
- Closes #12345
-
-- url: fix `-Wzero-length-array` with no protocols
-
- Fixes:
- ```
- ./lib/url.c:178:56: warning: use of an empty initializer is a C2x extension [
- -Wc2x-extensions]
- 178 | static const struct Curl_handler * const protocols[] = {
- | ^
- ./lib/url.c:178:56: warning: zero size arrays are an extension [-Wzero-length
- -array]
- ```
-
- Closes #12344
-
-- url: fix builds with `CURL_DISABLE_HTTP`
-
- Fixes:
- ```
- ./lib/url.c:456:35: error: no member named 'formp' in 'struct UrlState'
- 456 | Curl_mime_cleanpart(data->state.formp);
- | ~~~~~~~~~~~ ^
- ```
-
- Regression from 74b87a8af13a155c659227f5acfa78243a8b2aa6 #11682
-
- Closes #12343
-
-- http: fix `-Wunused-parameter` with no auth and no proxy
-
- ```
- lib/http.c:734:26: warning: unused parameter 'proxy' [-Wunused-parameter]
- bool proxy)
- ^
- ```
-
- Reviewed-by: Marcel Raad
- Closes #12338
-
-Daniel Stenberg (16 Nov 2023)
-
-- TODO: Some TLS options are not offered for HTTPS proxies
-
- Closes #12286
- Closes #12342
-
-- RELEASE-NOTES: synced
-
-- duphandle: make dupset() not return with pointers to old alloced data
-
- As the blob pointers are to be duplicated, the function must not return
- mid-function with lingering pointers to the old handle's allocated data,
- as that would lead to double-free in OOM situations.
-
- Make sure to clear all destination pointers first to avoid this risk.
-
- Closes #12337
-
-Viktor Szakats (16 Nov 2023)
-
-- http: fix `-Wunused-variable` compiler warning
-
- Fix compiler warnings in builds with disabled auths, NTLM and SPNEGO.
-
- E.g. with `CURL_DISABLE_BASIC_AUTH` + `CURL_DISABLE_BEARER_AUTH` +
- `CURL_DISABLE_DIGEST_AUTH` + `CURL_DISABLE_NEGOTIATE_AUTH` +
- `CURL_DISABLE_NTLM` on non-Windows.
-
- ```
- ./curl/lib/http.c:737:12: warning: unused variable 'result' [-Wunused-variabl
- e]
- CURLcode result = CURLE_OK;
- ^
- ./curl/lib/http.c:995:18: warning: variable 'availp' set but not used [-Wunus
- ed-but-set-variable]
- unsigned long *availp;
- ^
- ./curl/lib/http.c:996:16: warning: variable 'authp' set but not used [-Wunuse
- d-but-set-variable]
- struct auth *authp;
- ^
- ```
-
- Regression from e92edfbef64448ef461117769881f3ed776dec4e #11490
-
- Fixes #12228
- Closes #12335
-
-Jay Satiro (16 Nov 2023)
-
-- tool: support bold headers in Windows
-
- - If virtual terminal processing is enabled in Windows then use ANSI
- escape codes Esc[1m and Esc[22m to turn bold on and off.
-
- Suggested-by: Gisle Vanem
-
- Ref: https://github.com/curl/curl/discussions/11770
-
- Closes https://github.com/curl/curl/pull/12321
-
-Viktor Szakats (15 Nov 2023)
-
-- build: fix libssh2 + `CURL_DISABLE_DIGEST_AUTH` + `CURL_DISABLE_AWS`
-
- Builds with libssh2 + `-DCURL_DISABLE_DIGEST_AUTH=ON` +
- `-DCURL_DISABLE_AWS=ON` in combination with either Schannel on Windows,
- or `-DCURL_DISABLE_NTLM=ON` on other operating systems failed while
- compiling due to a missing HMAC declaration.
-
- The reason is that HMAC is required by `lib/sha256.c` which publishes
- `Curl_sha256it()` which is required by `lib/vssh/libssh2.c` when
- building for libssh2 v1.8.2 (2019-05-25) or older.
-
- Make sure to compile the HMAC bits for a successful build.
-
- Both HMAC and `Curl_sha256it()` rely on the same internals, so splitting
- them into separate sources isn't practical.
-
- Fixes:
- ```
- [...]
- In file included from ./curl/_x64-win-ucrt-cmake-llvm-bld/lib/CMakeFiles/libc
- url_object.dir/Unity/unity_0_c.c:310:
- ./curl/lib/sha256.c:527:42: error: array has incomplete element type 'const s
- truct HMAC_params'
- 527 | const struct HMAC_params Curl_HMAC_SHA256[] = {
- | ^
- ./curl/lib/curl_sha256.h:34:21: note: forward declaration of 'struct HMAC_par
- ams'
- [...]
- ```
-
- Regression from e92edfbef64448ef461117769881f3ed776dec4e #11490
-
- Fixes #12273
- Closes #12332
-
-Daniel Stenberg (15 Nov 2023)
-
-- duphandle: also free 'outcurl->cookies' in error path
-
- Fixes memory-leak when OOM mid-function
-
- Use plain free instead of safefree, since the entire struct is
- freed below.
-
- Remove some free calls that is already freed in Curl_freeset()
-
- Closes #12329
-
-Viktor Szakats (15 Nov 2023)
-
-- config-win32: set `HAVE_SNPRINTF` for mingw-w64
-
- It's available in all mingw-w64 releases. We already pre-fill this
- detection in CMake.
-
- Closes #12325
-
-- sasl: fix `-Wunused-function` compiler warning
-
- In builds with disabled auths.
-
- ```
- lib/curl_sasl.c:266:17: warning: unused function 'get_server_message' [-Wunus
- ed-function]
- static CURLcode get_server_message(struct SASL *sasl, struct Curl_easy *data,
- ^
- 1 warning generated.
- ```
- Ref: https://github.com/curl/trurl/actions/runs/6871732122/job/18689066151#st
- ep:3:3822
-
- Reviewed-by: Daniel Stenberg
- Closes #12326
-
-- build: picky warning updates
-
- - cmake: sync some picky gcc warnings with autotools.
- - cmake, autotools: add `-Wold-style-definition` for clang too.
- - cmake: more precise version info for old clang options.
- - cmake: use `IN LISTS` syntax in `foreach()`.
-
- Reviewed-by: Daniel Stenberg
- Reviewed-by: Marcel Raad
- Closes #12324
-
-Daniel Stenberg (15 Nov 2023)
-
-- urldata: move cookielist from UserDefined to UrlState
-
- 1. Because the value is not strictly set with a setopt option.
-
- 2. Because otherwise when duping a handle when all the set.* fields are
- first copied and an error happens (think out of memory mid-function),
- the function would easily free the list *before* it was deep-copied,
- which could lead to a double-free.
-
- Closes #12323
-
-Viktor Szakats (14 Nov 2023)
-
-- autotools: avoid passing `LDFLAGS` twice to libcurl
-
- autotools passes `LDFLAGS` automatically linker commands. curl's
- `lib/Makefile.am` customizes libcurl linker flags. In that
- customization, it added `LDFLAGS` to the custom flags. This resulted in
- passing `LDFLAGS` _twice_ to the `libtool` command.
-
- Most of the time this is benign, but some `LDFLAGS` options can break
- the build when passed twice. One such example is passing `.o` files,
- e.g. `crt*.o` files necessary when customizing the C runtime, e.g. for
- MUSL builds.
-
- Passing them twice resulted in duplicate symbol errors:
- ```
- libtool: link: clang-15 --target=aarch64-unknown-linux-musl [...] /usr/lib/a
- arch64-linux-musl/crt1.o [...] /usr/lib/aarch64-linux-musl/crt1.o [...]
- ld.lld-15: error: duplicate symbol: _start
- >>> defined at crt1.c
- >>> /usr/lib/aarch64-linux-musl/crt1.o:(.text+0x0)
- >>> defined at crt1.c
- >>> /usr/lib/aarch64-linux-musl/crt1.o:(.text+0x0)
- [...]
- clang: error: linker command failed with exit code 1 (use -v to see invocatio
- n)
- ```
-
- This behaviour came with commit 1a593191c2769a47b8c3e4d9715ec9f6dddf5e36
- (2013-07-23) as a fix for bug https://curl.haxx.se/bug/view.cgi?id=1217.
- The patch was a works-for-me hack that ended up merged in curl:
- https://sourceforge.net/p/curl/bugs/1217/#06ef
- With the root cause remaining unclear.
-
- Perhaps the SUNPro 12 linker was sensitive to `-L` `-l` order, requiring
- `-L` first? This would be unusual and suggests a bug in either the
- linker or in `libtool`.
-
- The curl build does pass the list of detected libs via its own
- `LIBCURL_LIBS` variable, which ends up before `LDFLAGS` on the `libtool`
- command line, but it's the job of `libtool` to ensure that even
- a peculiar linker gets the options in the expected order. Also because
- autotools passes `LDFLAGS` last, making it hardly possible to pass
- anything after it.
-
- Perhaps in the 10 years since this issue, this already got a fix
- upstream.
-
- This patch deletes `LDFLAGS` from our customized libcurl options,
- leaving a single copy of them as passed by autotools automatically.
-
- Reverts 1a593191c2769a47b8c3e4d9715ec9f6dddf5e36
- Closes #12310
-
-- autotools: accept linker flags via `CURL_LDFLAGS_{LIB,BIN}`
-
- To allow passing `LDFLAGS` specific to libcurl (`CURL_LDFLAGS_LIB`) and
- curl tool (`CURL_LDFLAGS_BIN`).
-
- This makes it possible to build libcurl and curl with a single
- invocation with lib- and tool-specific custom linker flags.
-
- Such flag can be enabling `.map` files, a `.def` file for libcurl DLL,
- controlling static/shared, incl. requesting a static curl tool (with
- `-static-libtool-libs`) while building both shared and static libcurl.
-
- curl-for-win uses the above and some more.
-
- These options are already supported in `Makefile.mk`. CMake has built-in
- variables for this.
-
- Closes #12312
-
-Jay Satiro (14 Nov 2023)
-
-- tool_cb_hdr: add an additional parsing check
-
- - Don't dereference the past-the-end element when parsing the server's
- Content-disposition header.
-
- As 'p' is advanced it can point to the past-the-end element and prior
- to this change 'p' could be dereferenced in that case.
-
- Technically the past-the-end element is not out of bounds because dynbuf
- (which manages the header line) automatically adds a null terminator to
- every buffer and that is not included in the buffer length passed to
- the header callback.
-
- Closes https://github.com/curl/curl/pull/12320
-
-Philip Heiduck (14 Nov 2023)
-
-- .cirrus.yml: freebsd 14
-
- ensure curl works on latest freebsd version
-
- Closes #12053
-
-Daniel Stenberg (13 Nov 2023)
-
-- easy: in duphandle, init the cookies for the new handle
-
- ... not the source handle.
-
- Closes #12318
-
-- duphandle: use strdup to clone *COPYPOSTFIELDS if size is not set
-
- Previously it would unconditionally use the size, which is set to -1
- when strlen is requested.
-
- Updated test 544 to verify.
-
- Closes #12317
-
-- RELEASE-NOTES: synced
-
-- curl_easy_duphandle.3: clarify how HSTS and alt-svc are duped
-
- Closes #12315
-
-- urldata: move hstslist from 'set' to 'state'
-
- To make it work properly with curl_easy_duphandle(). This, because
- duphandle duplicates the entire 'UserDefined' struct by plain copy while
- 'hstslist' is a linked curl_list of file names. This would lead to a
- double-free when the second of the two involved easy handles were
- closed.
-
- Closes #12315
-
-- test1900: verify duphandle with HSTS using multiple files
-
- Closes #12315
-
-Goro FUJI (13 Nov 2023)
-
-- http: allow longer HTTP/2 request method names
-
- - Increase the maximum request method name length from 11 to 23.
-
- For HTTP/1.1 and earlier there's not a specific limit in libcurl for
- method length except that it is limited by the initial HTTP request
- limit (DYN_HTTP_REQUEST). Prior to fc2f1e54 HTTP/2 was treated the same
- and there was no specific limit.
-
- According to Internet Assigned Numbers Authority (IANA) the longest
- registered method is UPDATEREDIRECTREF which is 17 characters.
-
- Also there are unregistered methods used by some companies that are
- longer than 11 characters.
-
- The limit was originally added by 61f52a97 but not used until fc2f1e54.
-
- Ref: https://www.iana.org/assignments/http-methods/http-methods.xhtml
-
- Closes https://github.com/curl/curl/pull/12311
-
-Jay Satiro (12 Nov 2023)
-
-- CURLOPT_CAINFO_BLOB.3: explain what CURL_BLOB_COPY does
-
- - Add an explanation of the CURL_BLOB_COPY flag to CURLOPT_CAINFO_BLOB
- and CURLOPT_PROXY_CAINFO_BLOB docs.
-
- All the other _BLOB option docs already have the same explanation.
-
- Closes https://github.com/curl/curl/pull/12277
-
-Viktor Szakats (11 Nov 2023)
-
-- tidy-up: dedupe Windows system libs in cmake
-
- Reviewed-by: Daniel Stenberg
- Closes #12307
-
-Junho Choi (11 Nov 2023)
-
-- ci: test with latest quiche release (0.19.0)
-
- Closes #12180
-
-- quiche: use quiche_conn_peer_transport_params()
-
- In recent quiche, transport parameter API is separated
- with quiche_conn_peer_transport_params().
- (https://github.com/cloudflare/quiche/pull/1575)
- It breaks with bulding with latest(post 0.18.0) quiche.
-
- Closes #12180
-
-Daniel Stenberg (11 Nov 2023)
-
-- Makefile: generate the VC 14.20 project files at dist-time
-
- Follow-up to 28287092cc5a6d6ef8 (#12282)
-
- Closes #12290
-
-Sam James (11 Nov 2023)
-
-- misc: fix -Walloc-size warnings
-
- GCC 14 introduces a new -Walloc-size included in -Wextra which gives:
-
- ```
- src/tool_operate.c: In function ‘add_per_transfer’:
- src/tool_operate.c:213:5: warning: allocation of insufficient size ‘1’ fo
- r type ‘struct per_transfer’ with size ‘480’ [-Walloc-size]
- 213 | p = calloc(sizeof(struct per_transfer), 1);
- | ^
- src/var.c: In function ‘addvariable’:
- src/var.c:361:5: warning: allocation of insufficient size ‘1’ for type â€
- ˜struct var’ with size ‘32’ [-Walloc-size]
- 361 | p = calloc(sizeof(struct var), 1);
- | ^
- ```
-
- The calloc prototype is:
- ```
- void *calloc(size_t nmemb, size_t size);
- ```
-
- So, just swap the number of members and size arguments to match the
- prototype, as we're initialising 1 struct of size `sizeof(struct
- ...)`. GCC then sees we're not doing anything wrong.
-
- Closes #12292
-
-Mark Gaiser (11 Nov 2023)
-
-- IPFS: bugfixes
-
- - Fixed endianness bug in gateway file parsing
- - Use IPFS_PATH in tests where IPFS_DATA was used
- - Fixed typos from traling -> trailing
- - Fixed broken link in IPFS.md
-
- Follow-up to 859e88f6533f9e
-
- Reported-by: Michael Kaufmann
- Bug: https://github.com/curl/curl/pull/12152#issuecomment-1798214137
- Closes #12305
-
-Daniel Stenberg (11 Nov 2023)
-
-- VULN-DISCLOSURE-POLIC: remove broken link to hackerone
-
- It should ideally soon not be done from hackerone anyway
-
- Closes #12308
-
-Andrew Kurushin (11 Nov 2023)
-
-- schannel: add CA cache support for files and memory blobs
-
- - Support CA bundle and blob caching.
-
- Cache timeout is 24 hours or can be set via CURLOPT_CA_CACHE_TIMEOUT.
-
- Closes https://github.com/curl/curl/pull/12261
-
-Daniel Stenberg (10 Nov 2023)
-
-- RELEASE-NOTES: synced
-
-Charlie C (10 Nov 2023)
-
-- cmake: option to disable install & drop `curlu` target when unused
-
- This patch makes the following changes:
- - adds the option `CURL_DISABLE_INSTALL` - to disable 'install' targets.
- - Removes the target `curlu` when the option `BUILD_TESTING` is set to
- `OFF` - to prevent it from being loaded in Visual Studio.
-
- Closes #12287
-
-Kai Pastor (10 Nov 2023)
-
-- cmake: fix multiple include of CURL package
-
- Fixes errors on second `find_package(CURL)`. This is a frequent case
- with transitive dependencies:
- ```
- CMake Error at ...:
- add_library cannot create ALIAS target "CURL::libcurl" because another
- target with the same name already exists.
- ```
-
- Test to reproduce:
- ```cmake
- cmake_minimum_required(VERSION 3.27) # must be 3.18 or higher
-
- project(curl)
-
- set(CURL_DIR "example/lib/cmake/CURL/")
- find_package(CURL CONFIG REQUIRED)
- find_package(CURL CONFIG REQUIRED) # fails
-
- add_executable(main main.c)
- target_link_libraries(main CURL::libcurl)
- ```
-
- Ref: https://cmake.org/cmake/help/latest/release/3.18.html#other-changes
- Ref: https://cmake.org/cmake/help/v3.18/policy/CMP0107.html
- Ref: #12300
- Assisted-by: Harry Mallon
- Closes #11913
-
-Viktor Szakats (8 Nov 2023)
-
-- tidy-up: use `OPENSSL_VERSION_NUMBER`
-
- Uniformly use `OPENSSL_VERSION_NUMBER` to check for OpenSSL version.
- Before this patch some places used `OPENSSL_VERSION_MAJOR`.
-
- Also fix `lib/md4.c`, which included `opensslconf.h`, but that doesn't
- define any version number in these implementations: BoringSSL, AWS-LC,
- LibreSSL, wolfSSL. (Only in mainline OpenSSL/quictls). Switch that to
- `opensslv.h`. This wasn't causing a deeper problem because the code is
- looking for v3, which is only provided by OpenSSL/quictls as of now.
-
- According to https://github.com/openssl/openssl/issues/17517, the macro
- `OPENSSL_VERSION_NUMBER` is safe to use and not deprecated.
-
- Reviewed-by: Marcel Raad
- Closes #12298
-
-Daniel Stenberg (8 Nov 2023)
-
-- resolve.d: drop a multi use-sentence
-
- Since the `multi:` keyword adds that message.
-
- Reported-by: ç©ä¸¹å°¼ Dan Jacobson
- Fixes https://github.com/curl/curl/discussions/12294
- Closes #12295
-
-- content_encoding: make Curl_all_content_encodings allocless
-
- - Fixes a memory leak pointed out by Coverity
- - Also found by OSS-Fuzz: https://bugs.chromium.org/p/oss-fuzz/issues/detail?
- id=63947
- - Avoids unncessary allocations
-
- Follow-up ad051e1cbec68b2456a22661b
-
- Closes #12289
-
-Michael Kaufmann (7 Nov 2023)
-
-- vtls: use ALPN "http/1.1" for HTTP/1.x, including HTTP/1.0
-
- Some servers don't support the ALPN protocol "http/1.0" (e.g. IIS 10),
- avoid it and use "http/1.1" instead.
-
- This reverts commit df856cb5c9 (#10183).
-
- Fixes #12259
- Closes #12285
-
-Daniel Stenberg (7 Nov 2023)
-
-- Makefile.am: drop vc10, vc11 and vc12 projects from dist
-
- They are end of life products. Support for generating them remain in the
- repo for a while but this change drops them from distribution.
-
- Closes #12288
-
-David Suter (7 Nov 2023)
-
-- projects: add VC14.20 project files
-
- Windows projects included VC14, VC14.10, VC14.30 but not VC14.20.
- OpenSSL and Wolf SSL scripts mention VC14.20 so I don't see a reason why
- this is missing. Updated the templates to produce a VC14.20 project.
- Project opens in Visual Studio 2019 as expected.
-
- Closes #12282
-
-Daniel Stenberg (7 Nov 2023)
-
-- curl: move IPFS code into src/tool_ipfs.[ch]
-
- - convert ensure_trailing into ensure_trailing_slash
- - strdup the URL string to own it proper
- - use shorter variable names
- - combine some expressions
- - simplify error handling in ipfs_gateway()
- - add MAX_GATEWAY_URL_LEN + proper bailout if maximum is reached
- - ipfs-gateway.d polish and simplification
- - shorten ipfs error message + make them "synthetic"
-
- Closes #12281
-
-Viktor Szakats (6 Nov 2023)
-
-- build: delete support bits for obsolete Windows compilers
-
- - Pelles C: Unclear status, failed to obtain a fresh copy a few months
- ago. Possible website is HTTP-only. ~10 years ago I left this compiler
- dealing with crashes and other issues with no response on the forum
- for years. It has seen some activity in curl back in 2021.
- - LCC: Last stable release in September 2002.
- - Salford C: Misses winsock2 support, possibly abandoned? Last mentioned
- in 2006.
- - Borland C++: We dropped Borland C++ support in 2018.
- - MS Visual C++ 6.0: Released in 1998. curl already requires VS 2010
- (or possibly 2008) as a minimum.
-
- Closes #12222
-
-- build: delete `HAVE_STDINT_H` and `HAVE_INTTYPES_H`
-
- We use `stdint.h` unconditionally in all places except one. These uses
- are imposed by external dependencies / features. nghttp2, quic, wolfSSL
- and `HAVE_MACH_ABSOLUTE_TIME` do require this C99 header. It means that
- any of these features make curl require a C99 compiler. (In case of
- MSVC, this means Visual Studio 2010 or newer.)
-
- This patch changes the single use of `stdint.h` guarded by
- `HAVE_STDINT_H` to use `stdint.h` unconditionally. Also stop using
- `inttypes.h` as an alternative there. `HAVE_INTTYPES_H` wasn't used
- anywhere else, allowing to delete this feature check as well.
-
- Closes #12275
-
-Daniel Stenberg (6 Nov 2023)
-
-- tool_operate: do not mix memory models
-
- Make sure 'inputpath' only points to memory allocated by libcurl so that
- curl_free works correctly.
-
- Pointed out by Coverity
-
- Follow-up to 859e88f6533f9e1f890
-
- Closes #12280
-
-Stefan Eissing (6 Nov 2023)
-
-- lib: client writer, part 2, accounting + logging
-
- This PR has these changes:
-
- Renaming of unencode_* to cwriter, e.g. client writers
- - documentation of sendf.h functions
- - move max decode stack checks back to content_encoding.c
- - define writer phase which was used as order before
- - introduce phases for monitoring inbetween decode phases
- - offering default implementations for init/write/close
-
- Add type paramter to client writer's do_write()
- - always pass all writes through the writer stack
- - writers who only care about BODY data will pass other writes unchanged
-
- add RAW and PROTOCOL client writers
- - RAW used for Curl_debug() logging of CURLINFO_DATA_IN
- - PROTOCOL used for updates to data->req.bytecount, max_filesize checks and
- Curl_pgrsSetDownloadCounter()
- - remove all updates of data->req.bytecount and calls to
- Curl_pgrsSetDownloadCounter() and Curl_debug() from other code
- - adjust test457 expected output to no longer see the excess write
-
- Closes #12184
-
-Daniel Stenberg (6 Nov 2023)
-
-- VULN-DISCLOSURE-POLICY: escape sequences are not a security flaw
-
- Closes #12278
-
-Viktor Szakats (6 Nov 2023)
-
-- rand: fix build error with autotools + LibreSSL
-
- autotools unexpectedly detects `arc4random` because it is also looking
- into dependency libs. One dependency, LibreSSL, happens to publish an
- `arc4random` function (via its shared lib before v3.7, also via static
- lib as of v3.8.2). When trying to use this function in `lib/rand.c`,
- its protoype is missing. To fix that, curl included a prototype, but
- that used a C99 type without including `stdint.h`, causing:
-
- ```
- ../../lib/rand.c:37:1: error: unknown type name 'uint32_t'
- 37 | uint32_t arc4random(void);
- | ^
- 1 error generated.
- ```
-
- This patch improves this by dropping the local prototype and instead
- limiting `arc4random` use for non-OpenSSL builds. OpenSSL builds provide
- their own random source anyway.
-
- The better fix would be to teach autotools to not link dependency libs
- while detecting `arc4random`.
-
- LibreSSL publishing a non-namespaced `arc4random` tracked here:
- https://github.com/libressl/portable/issues/928
-
- Regression from 755ddbe901cd0c921fbc3ac5b3775c0dc683bc73 #10672
-
- Reviewed-by: Daniel Stenberg
- Fixes #12257
- Closes #12274
-
-Daniel Stenberg (5 Nov 2023)
-
-- RELEASE-NOTES: synced
-
-- strdup: do Curl_strndup without strncpy
-
- To avoid (false positive) gcc-13 compiler warnings.
-
- Follow-up to 4855debd8a2c1cb
-
- Assisted-by: Jay Satiro
- Reported-by: Viktor Szakats
- Fixes #12258
-
-Enno Boland (5 Nov 2023)
-
-- HTTP: fix empty-body warning
-
- This change fixes a compiler warning with gcc-12.2.0 when
- `-DCURL_DISABLE_BEARER_AUTH=ON` is used.
-
- /home/tox/src/curl/lib/http.c: In function 'Curl_http_input_auth':
- /home/tox/src/curl/lib/http.c:1147:12: warning: suggest braces around emp
- ty body in an 'else' statement [-Wempty-body]
- 1147 | ;
- | ^
-
- Closes #12262
-
-Daniel Stenberg (5 Nov 2023)
-
-- openssl: identify the "quictls" backend correctly
-
- Since vanilla OpenSSL does not support the QUIC API I think it helps
- users to identify the correct OpenSSL fork in version output. The best
- (crude) way to do that right now seems to be to check if ngtcp2 support
- is enabled.
-
- Closes #12270
-
-Mark Gaiser (5 Nov 2023)
-
-- curl: improved IPFS and IPNS URL support
-
- Previously just ipfs://<cid> and ipns://<cid> was supported, which is
- too strict for some usecases.
-
- This patch allows paths and query arguments to be used too.
- Making this work according to normal http semantics:
-
- ipfs://<cid>/foo/bar?key=val
- ipns://<cid>/foo/bar?key=val
-
- The gateway url support is changed.
- It now only supports gateways in the form of:
-
- http://<gateway>/foo/bar
- http://<gateway>
-
- Query arguments here are explicitly not allowed and trigger an intended
- malformed url error.
-
- There also was a crash when IPFS_PATH was set with a non trailing
- forward slash. This has been fixed.
-
- Lastly, a load of test cases have been added to verify the above.
-
- Reported-by: Steven Allen
- Fixes #12148
- Closes #12152
-
-Harry Mallon (5 Nov 2023)
-
-- docs: KNOWN_BUGS cleanup
-
- * Remove other mention of hyper memory-leaks from `KNOWN_BUGS`.
- Should have been removed in 629723ecf22a8eae78d64cceec2f3bdae703ec95
-
- * Remove mention of aws-sigv4 sort query string from `KNOWN_BUGS`.
- Fixed in #11806
-
- * Remove mention of aws-sigv4 query empty value problems
-
- * Remove mention of aws-sigv4 missing amz-content-sha256
- Fixed in #9995
-
-- http_aws_sigv4: canonicalise valueless query params
-
- Fixes #8107
- Closes #12244
-
-Michael Kaufmann (4 Nov 2023)
-
-- docs: preserve the modification date when copying the prebuilt man page
-
- The previously built man page "curl.1" must be copied with the original
- modification date, otherwise the man page is never updated.
-
- This fixes a bug that has been introduced with commit 2568441cab.
-
- Reviewed-by: Dan Fandrich
- Reviewed-by: Daniel Stenberg
-
- Closes #12199
-
-Daniel Stenberg (4 Nov 2023)
-
-- docs: remove bold from some man page SYNOPSIS sections
-
- In the name of consistency
-
- Closes #12267
-
-- openssl: two multi pointer checks should probably rather be asserts
-
- ... so add the asserts now and consider removing the dynamic checks in a
- future.
-
- Ref: #12261
- Closes #12264
-
-boilingoden (4 Nov 2023)
-
-- docs: add supported version for the json write-out
-
- xref: https://curl.se/changes.html#7_70_0
-
- Closes #12266
diff --git a/libs/libcurl/docs/THANKS b/libs/libcurl/docs/THANKS
index 7910255eda..cb276cd0bd 100644
--- a/libs/libcurl/docs/THANKS
+++ b/libs/libcurl/docs/THANKS
@@ -68,6 +68,7 @@ Aleksandar Milivojevic
Aleksander Mazur
Aleksandr Krotov
Aleksey Tulinov
+alervd on github
Ales Mlakar
Ales Novak
Alessandro Ghedini
@@ -93,6 +94,7 @@ Alex Nichols
Alex Potapenko
Alex Rousskov
Alex Samorukov
+Alex Snast
Alex Suykov
Alex Vinnik
Alex Xu
@@ -109,6 +111,7 @@ Alexander Krasnostavsky
Alexander Lazic
Alexander Pepper
Alexander Peslyak
+Alexander Shtuchkin
Alexander Sinditskiy
Alexander Traud
Alexander V. Tikhonov
@@ -209,6 +212,8 @@ Andrés García
Andy Alt
Andy Cedilnik
Andy Fiddaman
+Andy Pan
+Andy Reitz
Andy Serpa
Andy Stamp
Andy Tsouladze
@@ -270,6 +275,7 @@ AtariDreams on github
Ates Goral
atjg on github
Augustus Saunders
+Aurélien Pierre
Austin Green
av223119 on github
Avery Fay
@@ -340,6 +346,7 @@ Bertrand Demiddelaer
Bertrand Simonnet
beslick5 on github
Bevan Weiss
+Bhanu Prakash
Bill Doyle
Bill Egert
Bill Hoffman
@@ -404,6 +411,7 @@ Brian Green
Brian Inglis
Brian J. Murrell
Brian Lund
+brian m. carlson
Brian Nixon
Brian Prodoehl
Brian R Duffy
@@ -497,6 +505,7 @@ Christian Fillion
Christian Grothoff
Christian Heimes
Christian Hesse
+Christian Heusel
Christian Hägele
Christian Krause
Christian Kurz
@@ -534,6 +543,7 @@ Clifford Wolf
Clint Clayton
Cloudogu Siebels
Clément Notin
+CMD
cmfrolick on github
codesniffer13 on github
Cody Jones
@@ -714,6 +724,7 @@ Denis Laxalde
Denis Ollier
Dennis Clarke
Dennis Felsing
+dependabot[bot]
Derek Higgins
Derzsi Dániel
Desmond O. Chang
@@ -768,16 +779,19 @@ Dmitry S. Baikov
Dmitry Tretyakov
Dmitry Wagin
dnivras on github
+dogma
Dolbneff A.V
Domen Kožar
Domenico Andreoli
Dominick Meglio
Dominik Hölzl
Dominik Klemba
+Dominik PiÄ…tkowski
Dominik Thalhammer
Dominique Leuenberger
Don J Olmstead
Dongliang Mu
+Dorian Craps
Doron Behar
Doug Kaufman
Doug Porter
@@ -838,6 +852,7 @@ Eli Schwartz
Elia Tufarolo
Elliot Killick
Elliot Saba
+Elliott Balsley
Ellis Pritchard
Elmira A Semenova
Elms
@@ -1071,6 +1086,7 @@ Grigory Entin
Grisha Levit
Guenole Bescon
Guido Berhoerster
+Guilherme Puida
Guillaume Algis
Guillaume Arluison
guitared on github
@@ -1236,6 +1252,7 @@ Jakub Bochenski
Jakub Jelen
Jakub Wilk
Jakub Zakrzewski
+James Abbatiello
James Atwill
James Brown
James Bursa
@@ -1293,6 +1310,7 @@ Javier Navarro
Javier Sixto
Jay Austin
Jay Dommaschk
+Jay Guerette
Jay Wu
Jayesh A Shah
Jaz Fresh
@@ -1354,6 +1372,7 @@ Jesse Noller
Jesse Tan
jethrogb on github
jhoyla on github
+Jiang Wenjian
Jiawen Geng
Jie He
Jiehong on github
@@ -1460,6 +1479,7 @@ Jonatan Lander
Jonatan Vela
Jonathan Cardoso Machado
Jonathan Hseu
+Jonathan Matthews
Jonathan Moerman
Jonathan Nieder
Jonathan Perkin
@@ -1557,6 +1577,7 @@ Kari Pahula
Karl Chen
Karl Moerder
Karol Pietrzak
+kartatz
Kartatz on Github
Karthikdasari0423
Karthikdasari0423 on github
@@ -1566,6 +1587,7 @@ Katie Wang
Katsuhiko YOSHIDA
Kazuho Oku
kchow-FTNT on github
+Keerthi Timmaraju
Kees Cook
Kees Dekker
Keitagit-kun on github
@@ -1675,6 +1697,7 @@ Lawrence Wagerfield
Leah Neukirchen
Lealem Amedie
Leandro Coutinho
+Lee Li
LeeRiva
Legoff Vincent
Lehel Bernadt
@@ -1748,6 +1771,7 @@ Luke Amery
Luke Call
Luke Dashjr
Luke Granger-Brown
+Luke Hamburg
Lukáš Zaoral
luminixinc on github
Luo Jinghua
@@ -1865,6 +1889,7 @@ Martin Jansen
Martin Kammerhofer
Martin Kepplinger
Martin Lemke
+Martin Peck
Martin Schmatz
Martin Skinner
Martin Staael
@@ -1874,6 +1899,7 @@ Martin V
Martin Vejnár
Martin Waleczek
Martin Ã…gren
+martinevsky
Marty Kuhrt
Maruko
Masaya Suzuki
@@ -1915,6 +1941,7 @@ Matthew Whitehead
Matthias Bolte
Matthias Gatto
Matthias Naegler
+Matthieu Baerts
Mattias Fornander
Matus Uzak
Maurice Barnum
@@ -2051,6 +2078,7 @@ momala454 on github
Momoka Yamamoto
MonkeybreadSoftware on github
moohoorama on github
+Morgan Willcock
Morten Minde Neergaard
Mostyn Bramley-Moore
Moti Avrahami
@@ -2175,6 +2203,7 @@ Oliver Schindler
Oliver Urbann
oliverpool on github
Olivier Berger
+Olivier Bonaventure
Olivier Brunel
Omar Ramadan
omau on github
@@ -2429,6 +2458,7 @@ Renaud Lehoux
Rene Bernhardt
Rene Rebe
renovate[bot]
+renovate[bot]
Reuven Wachtfogel
RevaliQaQ on github
Reza Arbab
@@ -2615,6 +2645,7 @@ Sascha Zengler
Satadru Pramanik
Satana de Sant'Ana
Saul good
+saurabhsingh-dev on github
Saurav Babu
sayrer on github
SBKarr on github
@@ -2634,6 +2665,7 @@ Sean McArthur
Sean Miller
Sean Molenaar
Sebastiaan van Erk
+Sebastian Andersson
Sebastian Haglund
Sebastian Mundry
Sebastian Neubauer
@@ -2663,6 +2695,7 @@ Sergio Mijatovic
Sergio-IME on github
sergio-nsk on github
Serj Kalichev
+Sertonix
SerusDev on github
Seshubabu Pasam
Seth Mos
@@ -2946,6 +2979,7 @@ Tommy Chiang
Tommy Odom
Tommy Petty
Tommy Tam
+tomy2105 on github
Ton Voon
Toni Moreno
Tony Kelman
@@ -3040,6 +3074,7 @@ Volker Schmid
Vsevolod Novikov
vshmuk on hackerone
vulnerabilityspotter on hackerone
+vuonganh1993 on github
vvb2060
vvb2060 on github
Vyron Tsingaras
@@ -3135,6 +3170,7 @@ Yves Arrouye
Yves Lejeune
YX Hao
z2-2z on github
+z2_
z2_ on hackerone
Zachary Seguin
Zdenek Pavlas
@@ -3173,5 +3209,7 @@ zzq1015 on github
加藤éƒä¹‹
å—宫雪çŠ
左潇峰
+æŽå››
梦终无痕
ç©ä¸¹å°¼ Dan Jacobson
+ç½—æœè¾‰
diff --git a/libs/libcurl/include/README.md b/libs/libcurl/include/README.md
index 23f798eddc..e824788818 100644
--- a/libs/libcurl/include/README.md
+++ b/libs/libcurl/include/README.md
@@ -8,7 +8,7 @@ SPDX-License-Identifier: curl
Public include files for libcurl, external users.
-They're all placed in the curl subdirectory here for better fit in any kind of
+They are all placed in the curl subdirectory here for better fit in any kind of
environment. You must include files from here using...
#include <curl/curl.h>
diff --git a/libs/libcurl/include/curl/curl.h b/libs/libcurl/include/curl/curl.h
index 48e28f543b..b7016055eb 100644
--- a/libs/libcurl/include/curl/curl.h
+++ b/libs/libcurl/include/curl/curl.h
@@ -34,24 +34,32 @@
#endif
/* Compile-time deprecation macros. */
-#if defined(__GNUC__) && \
- ((__GNUC__ > 12) || ((__GNUC__ == 12) && (__GNUC_MINOR__ >= 1 ))) && \
+#if (defined(__GNUC__) && \
+ ((__GNUC__ > 12) || ((__GNUC__ == 12) && (__GNUC_MINOR__ >= 1 ))) || \
+ defined(__IAR_SYSTEMS_ICC__)) && \
!defined(__INTEL_COMPILER) && \
!defined(CURL_DISABLE_DEPRECATION) && !defined(BUILDING_LIBCURL)
#define CURL_DEPRECATED(version, message) \
__attribute__((deprecated("since " # version ". " message)))
+#if defined(__IAR_SYSTEMS_ICC__)
+#define CURL_IGNORE_DEPRECATION(statements) \
+ _Pragma("diag_suppress=Pe1444") \
+ statements \
+ _Pragma("diag_default=Pe1444")
+#else
#define CURL_IGNORE_DEPRECATION(statements) \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \
statements \
_Pragma("GCC diagnostic pop")
+#endif
#else
#define CURL_DEPRECATED(version, message)
#define CURL_IGNORE_DEPRECATION(statements) statements
#endif
#include "curlver.h" /* libcurl version defines */
-#include "system.h" /* determine things run-time */
+#include "system.h" /* determine things runtime */
#include <stdio.h>
#include <limits.h>
@@ -69,7 +77,7 @@
#if !(defined(_WINSOCKAPI_) || defined(_WINSOCK_H) || \
defined(__LWIP_OPT_H__) || defined(LWIP_HDR_OPT_H))
/* The check above prevents the winsock2 inclusion if winsock.h already was
- included, since they can't co-exist without problems */
+ included, since they cannot co-exist without problems */
#include <winsock2.h>
#include <ws2tcpip.h>
#endif
@@ -189,9 +197,9 @@ struct curl_httppost {
files */
long flags; /* as defined below */
-/* specified content is a file name */
+/* specified content is a filename */
#define CURL_HTTPPOST_FILENAME (1<<0)
-/* specified content is a file name */
+/* specified content is a filename */
#define CURL_HTTPPOST_READFILE (1<<1)
/* name is only stored pointer do not free in formfree */
#define CURL_HTTPPOST_PTRNAME (1<<2)
@@ -207,8 +215,8 @@ struct curl_httppost {
/* use size in 'contentlen', added in 7.46.0 */
#define CURL_HTTPPOST_LARGE (1<<7)
- char *showfilename; /* The file name to show. If not set, the
- actual file name will be used (if this
+ char *showfilename; /* The filename to show. If not set, the
+ actual filename will be used (if this
is a file part) */
void *userp; /* custom pointer used for
HTTPPOST_CALLBACK posts */
@@ -350,13 +358,13 @@ typedef long (*curl_chunk_bgn_callback)(const void *transfer_info,
download of an individual chunk finished.
Note! After this callback was set then it have to be called FOR ALL chunks.
Even if downloading of this chunk was skipped in CHUNK_BGN_FUNC.
- This is the reason why we don't need "transfer_info" parameter in this
+ This is the reason why we do not need "transfer_info" parameter in this
callback and we are not interested in "remains" parameter too. */
typedef long (*curl_chunk_end_callback)(void *ptr);
/* return codes for FNMATCHFUNCTION */
#define CURL_FNMATCHFUNC_MATCH 0 /* string corresponds to the pattern */
-#define CURL_FNMATCHFUNC_NOMATCH 1 /* pattern doesn't match the string */
+#define CURL_FNMATCHFUNC_NOMATCH 1 /* pattern does not match the string */
#define CURL_FNMATCHFUNC_FAIL 2 /* an error occurred */
/* callback type for wildcard downloading pattern matching. If the
@@ -368,7 +376,7 @@ typedef int (*curl_fnmatch_callback)(void *ptr,
/* These are the return codes for the seek callbacks */
#define CURL_SEEKFUNC_OK 0
#define CURL_SEEKFUNC_FAIL 1 /* fail the entire transfer */
-#define CURL_SEEKFUNC_CANTSEEK 2 /* tell libcurl seeking can't be done, so
+#define CURL_SEEKFUNC_CANTSEEK 2 /* tell libcurl seeking cannot be done, so
libcurl might try other means instead */
typedef int (*curl_seek_callback)(void *instream,
curl_off_t offset,
@@ -451,7 +459,7 @@ typedef curlioerr (*curl_ioctl_callback)(CURL *handle,
#ifndef CURL_DID_MEMORY_FUNC_TYPEDEFS
/*
* The following typedef's are signatures of malloc, free, realloc, strdup and
- * calloc respectively. Function pointers of these types can be passed to the
+ * calloc respectively. Function pointers of these types can be passed to the
* curl_global_init_mem() function to set user defined memory management
* callback routines.
*/
@@ -539,17 +547,17 @@ typedef enum {
CURLE_WRITE_ERROR, /* 23 */
CURLE_OBSOLETE24, /* 24 - NOT USED */
CURLE_UPLOAD_FAILED, /* 25 - failed upload "command" */
- CURLE_READ_ERROR, /* 26 - couldn't open/read from file */
+ CURLE_READ_ERROR, /* 26 - could not open/read from file */
CURLE_OUT_OF_MEMORY, /* 27 */
CURLE_OPERATION_TIMEDOUT, /* 28 - the timeout time was reached */
CURLE_OBSOLETE29, /* 29 - NOT USED */
CURLE_FTP_PORT_FAILED, /* 30 - FTP PORT operation failed */
CURLE_FTP_COULDNT_USE_REST, /* 31 - the REST command failed */
CURLE_OBSOLETE32, /* 32 - NOT USED */
- CURLE_RANGE_ERROR, /* 33 - RANGE "command" didn't work */
+ CURLE_RANGE_ERROR, /* 33 - RANGE "command" did not work */
CURLE_HTTP_POST_ERROR, /* 34 */
CURLE_SSL_CONNECT_ERROR, /* 35 - wrong when connecting with SSL */
- CURLE_BAD_DOWNLOAD_RESUME, /* 36 - couldn't resume download */
+ CURLE_BAD_DOWNLOAD_RESUME, /* 36 - could not resume download */
CURLE_FILE_COULDNT_READ_FILE, /* 37 */
CURLE_LDAP_CANNOT_BIND, /* 38 */
CURLE_LDAP_SEARCH_FAILED, /* 39 */
@@ -573,9 +581,9 @@ typedef enum {
CURLE_RECV_ERROR, /* 56 - failure in receiving network data */
CURLE_OBSOLETE57, /* 57 - NOT IN USE */
CURLE_SSL_CERTPROBLEM, /* 58 - problem with the local certificate */
- CURLE_SSL_CIPHER, /* 59 - couldn't use specified cipher */
+ CURLE_SSL_CIPHER, /* 59 - could not use specified cipher */
CURLE_PEER_FAILED_VERIFICATION, /* 60 - peer's certificate or fingerprint
- wasn't verified fine */
+ was not verified fine */
CURLE_BAD_CONTENT_ENCODING, /* 61 - Unrecognized/bad encoding */
CURLE_OBSOLETE62, /* 62 - NOT IN USE since 7.82.0 */
CURLE_FILESIZE_EXCEEDED, /* 63 - Maximum file size exceeded */
@@ -604,7 +612,7 @@ typedef enum {
CURLE_SSL_SHUTDOWN_FAILED, /* 80 - Failed to shut down the SSL
connection */
CURLE_AGAIN, /* 81 - socket is not ready for send/recv,
- wait till it's ready and try again (Added
+ wait till it is ready and try again (Added
in 7.18.2) */
CURLE_SSL_CRL_BADFILE, /* 82 - could not load CRL file, missing or
wrong format (Added in 7.19.0) */
@@ -763,7 +771,7 @@ typedef CURLcode (*curl_conv_callback)(char *buffer, size_t length);
typedef CURLcode (*curl_ssl_ctx_callback)(CURL *curl, /* easy handle */
void *ssl_ctx, /* actually an OpenSSL
- or WolfSSL SSL_CTX,
+ or wolfSSL SSL_CTX,
or an mbedTLS
mbedtls_ssl_config */
void *userptr);
@@ -780,7 +788,7 @@ typedef enum {
CURLPROXY_SOCKS5 = 5, /* added in 7.10 */
CURLPROXY_SOCKS4A = 6, /* added in 7.18.0 */
CURLPROXY_SOCKS5_HOSTNAME = 7 /* Use the SOCKS5 protocol but pass along the
- host name rather than the IP address. added
+ hostname rather than the IP address. added
in 7.18.0 */
} curl_proxytype; /* this enum was added in 7.10 */
@@ -860,7 +868,7 @@ enum curl_khstat {
CURLKHSTAT_FINE_ADD_TO_FILE,
CURLKHSTAT_FINE,
CURLKHSTAT_REJECT, /* reject the connection, return an error */
- CURLKHSTAT_DEFER, /* do not accept it, but we can't answer right now.
+ CURLKHSTAT_DEFER, /* do not accept it, but we cannot answer right now.
Causes a CURLE_PEER_FAILED_VERIFICATION error but the
connection will be left intact etc */
CURLKHSTAT_FINE_REPLACE, /* accept and replace the wrong key */
@@ -1080,7 +1088,7 @@ typedef CURLSTScode (*curl_hstswrite_callback)(CURL *easy,
#define CURLOPT(na,t,nu) na = t + nu
#define CURLOPTDEPRECATED(na,t,nu,v,m) na CURL_DEPRECATED(v,m) = t + nu
-/* CURLOPT aliases that make no run-time difference */
+/* CURLOPT aliases that make no runtime difference */
/* 'char *' argument to a string with a trailing zero */
#define CURLOPTTYPE_STRINGPOINT CURLOPTTYPE_OBJECTPOINT
@@ -1147,7 +1155,7 @@ typedef enum {
*
* For large file support, there is also a _LARGE version of the key
* which takes an off_t type, allowing platforms with larger off_t
- * sizes to handle larger files. See below for INFILESIZE_LARGE.
+ * sizes to handle larger files. See below for INFILESIZE_LARGE.
*/
CURLOPT(CURLOPT_INFILESIZE, CURLOPTTYPE_LONG, 14),
@@ -1180,7 +1188,7 @@ typedef enum {
*
* Note there is also a _LARGE version of this key which uses
* off_t types, allowing for large file offsets on platforms which
- * use larger-than-32-bit off_t's. Look below for RESUME_FROM_LARGE.
+ * use larger-than-32-bit off_t's. Look below for RESUME_FROM_LARGE.
*/
CURLOPT(CURLOPT_RESUME_FROM, CURLOPTTYPE_LONG, 21),
@@ -1316,9 +1324,9 @@ typedef enum {
/* Set the interface string to use as outgoing network interface */
CURLOPT(CURLOPT_INTERFACE, CURLOPTTYPE_STRINGPOINT, 62),
- /* Set the krb4/5 security level, this also enables krb4/5 awareness. This
- * is a string, 'clear', 'safe', 'confidential' or 'private'. If the string
- * is set but doesn't match one of these, 'private' will be used. */
+ /* Set the krb4/5 security level, this also enables krb4/5 awareness. This
+ * is a string, 'clear', 'safe', 'confidential' or 'private'. If the string
+ * is set but does not match one of these, 'private' will be used. */
CURLOPT(CURLOPT_KRBLEVEL, CURLOPTTYPE_STRINGPOINT, 63),
/* Set if we should verify the peer in ssl handshake, set 1 to verify. */
@@ -1350,16 +1358,16 @@ typedef enum {
/* 73 = OBSOLETE */
/* Set to explicitly use a new connection for the upcoming transfer.
- Do not use this unless you're absolutely sure of this, as it makes the
+ Do not use this unless you are absolutely sure of this, as it makes the
operation slower and is less friendly for the network. */
CURLOPT(CURLOPT_FRESH_CONNECT, CURLOPTTYPE_LONG, 74),
/* Set to explicitly forbid the upcoming transfer's connection to be reused
- when done. Do not use this unless you're absolutely sure of this, as it
+ when done. Do not use this unless you are absolutely sure of this, as it
makes the operation slower and is less friendly for the network. */
CURLOPT(CURLOPT_FORBID_REUSE, CURLOPTTYPE_LONG, 75),
- /* Set to a file name that contains random data for libcurl to use to
+ /* Set to a filename that contains random data for libcurl to use to
seed the random engine when doing SSL connects. */
CURLOPTDEPRECATED(CURLOPT_RANDOM_FILE, CURLOPTTYPE_STRINGPOINT, 76,
7.84.0, "Serves no purpose anymore"),
@@ -1386,8 +1394,8 @@ typedef enum {
* provided hostname. */
CURLOPT(CURLOPT_SSL_VERIFYHOST, CURLOPTTYPE_LONG, 81),
- /* Specify which file name to write all known cookies in after completed
- operation. Set file name to "-" (dash) to make it go to stdout. */
+ /* Specify which filename to write all known cookies in after completed
+ operation. Set filename to "-" (dash) to make it go to stdout. */
CURLOPT(CURLOPT_COOKIEJAR, CURLOPTTYPE_STRINGPOINT, 82),
/* Specify which SSL ciphers to use */
@@ -1486,7 +1494,7 @@ typedef enum {
CURLOPT(CURLOPT_HTTPAUTH, CURLOPTTYPE_VALUES, 107),
/* Set the ssl context callback function, currently only for OpenSSL or
- WolfSSL ssl_ctx, or mbedTLS mbedtls_ssl_config in the second argument.
+ wolfSSL ssl_ctx, or mbedTLS mbedtls_ssl_config in the second argument.
The function must match the curl_ssl_ctx_callback prototype. */
CURLOPT(CURLOPT_SSL_CTX_FUNCTION, CURLOPTTYPE_FUNCTIONPOINT, 108),
@@ -1506,7 +1514,7 @@ typedef enum {
CURLOPT(CURLOPT_PROXYAUTH, CURLOPTTYPE_VALUES, 111),
/* Option that changes the timeout, in seconds, associated with getting a
- response. This is different from transfer timeout time and essentially
+ response. This is different from transfer timeout time and essentially
places a demand on the server to acknowledge commands in a timely
manner. For FTP, SMTP, IMAP and POP3. */
CURLOPT(CURLOPT_SERVER_RESPONSE_TIMEOUT, CURLOPTTYPE_LONG, 112),
@@ -1520,7 +1528,7 @@ typedef enum {
an HTTP or FTP server.
Note there is also _LARGE version which adds large file support for
- platforms which have larger off_t sizes. See MAXFILESIZE_LARGE below. */
+ platforms which have larger off_t sizes. See MAXFILESIZE_LARGE below. */
CURLOPT(CURLOPT_MAXFILESIZE, CURLOPTTYPE_LONG, 114),
/* See the comment for INFILESIZE above, but in short, specifies
@@ -1528,17 +1536,17 @@ typedef enum {
*/
CURLOPT(CURLOPT_INFILESIZE_LARGE, CURLOPTTYPE_OFF_T, 115),
- /* Sets the continuation offset. There is also a CURLOPTTYPE_LONG version
+ /* Sets the continuation offset. There is also a CURLOPTTYPE_LONG version
* of this; look above for RESUME_FROM.
*/
CURLOPT(CURLOPT_RESUME_FROM_LARGE, CURLOPTTYPE_OFF_T, 116),
/* Sets the maximum size of data that will be downloaded from
- * an HTTP or FTP server. See MAXFILESIZE above for the LONG version.
+ * an HTTP or FTP server. See MAXFILESIZE above for the LONG version.
*/
CURLOPT(CURLOPT_MAXFILESIZE_LARGE, CURLOPTTYPE_OFF_T, 117),
- /* Set this option to the file name of your .netrc file you want libcurl
+ /* Set this option to the filename of your .netrc file you want libcurl
to parse (using the CURLOPT_NETRC option). If not set, libcurl will do
a poor attempt to find the user's home directory and check for a .netrc
file in there. */
@@ -1685,7 +1693,7 @@ typedef enum {
/* Callback function for opening socket (instead of socket(2)). Optionally,
callback is able change the address or refuse to connect returning
- CURL_SOCKET_BAD. The callback should have type
+ CURL_SOCKET_BAD. The callback should have type
curl_opensocket_callback */
CURLOPT(CURLOPT_OPENSOCKETFUNCTION, CURLOPTTYPE_FUNCTIONPOINT, 163),
CURLOPT(CURLOPT_OPENSOCKETDATA, CURLOPTTYPE_CBPOINT, 164),
@@ -1755,7 +1763,7 @@ typedef enum {
CURLOPTDEPRECATED(CURLOPT_REDIR_PROTOCOLS, CURLOPTTYPE_LONG, 182,
7.85.0, "Use CURLOPT_REDIR_PROTOCOLS_STR"),
- /* set the SSH knownhost file name to use */
+ /* set the SSH knownhost filename to use */
CURLOPT(CURLOPT_SSH_KNOWNHOSTS, CURLOPTTYPE_STRINGPOINT, 183),
/* set the SSH host key callback, must point to a curl_sshkeycallback
@@ -1836,7 +1844,7 @@ typedef enum {
future libcurl release.
libcurl will ask for the compressed methods it knows of, and if that
- isn't any, it will not ask for transfer-encoding at all even if this
+ is not any, it will not ask for transfer-encoding at all even if this
option is set to 1.
*/
@@ -1938,7 +1946,7 @@ typedef enum {
/* Service Name */
CURLOPT(CURLOPT_SERVICE_NAME, CURLOPTTYPE_STRINGPOINT, 236),
- /* Wait/don't wait for pipe/mutex to clarify */
+ /* Wait/do not wait for pipe/mutex to clarify */
CURLOPT(CURLOPT_PIPEWAIT, CURLOPTTYPE_LONG, 237),
/* Set the protocol used when curl is given a URL without a protocol */
@@ -2099,7 +2107,7 @@ typedef enum {
/* alt-svc control bitmask */
CURLOPT(CURLOPT_ALTSVC_CTRL, CURLOPTTYPE_LONG, 286),
- /* alt-svc cache file name to possibly read from/write to */
+ /* alt-svc cache filename to possibly read from/write to */
CURLOPT(CURLOPT_ALTSVC, CURLOPTTYPE_STRINGPOINT, 287),
/* maximum age (idle time) of a connection to consider it for reuse
@@ -2131,7 +2139,7 @@ typedef enum {
/* HSTS bitmask */
CURLOPT(CURLOPT_HSTS_CTRL, CURLOPTTYPE_LONG, 299),
- /* HSTS file name */
+ /* HSTS filename */
CURLOPT(CURLOPT_HSTS, CURLOPTTYPE_STRINGPOINT, 300),
/* HSTS read callback */
@@ -2210,9 +2218,12 @@ typedef enum {
/* millisecond version */
CURLOPT(CURLOPT_SERVER_RESPONSE_TIMEOUT_MS, CURLOPTTYPE_LONG, 324),
- /* set ECH configuration */
+ /* set ECH configuration */
CURLOPT(CURLOPT_ECH, CURLOPTTYPE_STRINGPOINT, 325),
+ /* maximum number of keepalive probes (Linux, *BSD, macOS, etc.) */
+ CURLOPT(CURLOPT_TCP_KEEPCNT, CURLOPTTYPE_LONG, 326),
+
CURLOPT_LASTENTRY /* the last unused */
} CURLoption;
@@ -2263,9 +2274,9 @@ typedef enum {
/* These enums are for use with the CURLOPT_HTTP_VERSION option. */
enum {
- CURL_HTTP_VERSION_NONE, /* setting this means we don't care, and that we'd
- like the library to choose the best possible
- for us! */
+ CURL_HTTP_VERSION_NONE, /* setting this means we do not care, and that we
+ would like the library to choose the best
+ possible for us! */
CURL_HTTP_VERSION_1_0, /* please use HTTP 1.0 in the request */
CURL_HTTP_VERSION_1_1, /* please use HTTP 1.1 in the request */
CURL_HTTP_VERSION_2_0, /* please use HTTP 2 in the request */
@@ -2425,7 +2436,7 @@ CURL_EXTERN CURLcode curl_mime_name(curl_mimepart *part, const char *name);
*
* DESCRIPTION
*
- * Set mime part remote file name.
+ * Set mime part remote filename.
*/
CURL_EXTERN CURLcode curl_mime_filename(curl_mimepart *part,
const char *filename);
@@ -2706,10 +2717,10 @@ CURL_EXTERN CURLcode curl_global_init(long flags);
* DESCRIPTION
*
* curl_global_init() or curl_global_init_mem() should be invoked exactly once
- * for each application that uses libcurl. This function can be used to
+ * for each application that uses libcurl. This function can be used to
* initialize libcurl and set user defined memory management callback
- * functions. Users can implement memory management routines to check for
- * memory leaks, check for mis-use of the curl library etc. User registered
+ * functions. Users can implement memory management routines to check for
+ * memory leaks, check for mis-use of the curl library etc. User registered
* callback routines will be invoked by this library instead of the system
* memory management routines like malloc, free etc.
*/
@@ -2827,7 +2838,7 @@ CURL_EXTERN time_t curl_getdate(const char *p, const time_t *unused);
for with CURLOPT_CERTINFO / CURLINFO_CERTINFO */
struct curl_certinfo {
int num_of_certs; /* number of certificates with information */
- struct curl_slist **certinfo; /* for each index in this array, there's a
+ struct curl_slist **certinfo; /* for each index in this array, there is a
linked list with textual information for a
certificate in the format "name:content".
eg "Subject:foo", "Issuer:bar", etc. */
@@ -3018,7 +3029,7 @@ typedef enum {
} CURLSHcode;
typedef enum {
- CURLSHOPT_NONE, /* don't use */
+ CURLSHOPT_NONE, /* do not use */
CURLSHOPT_SHARE, /* specify a data type to share */
CURLSHOPT_UNSHARE, /* specify which data type to stop sharing */
CURLSHOPT_LOCKFUNC, /* pass in a 'curl_lock_function' pointer */
@@ -3177,7 +3188,7 @@ CURL_EXTERN curl_version_info_data *curl_version_info(CURLversion);
* DESCRIPTION
*
* The curl_easy_strerror function may be used to turn a CURLcode value
- * into the equivalent human readable error string. This is useful
+ * into the equivalent human readable error string. This is useful
* for printing meaningful error messages.
*/
CURL_EXTERN const char *curl_easy_strerror(CURLcode);
@@ -3188,7 +3199,7 @@ CURL_EXTERN const char *curl_easy_strerror(CURLcode);
* DESCRIPTION
*
* The curl_share_strerror function may be used to turn a CURLSHcode value
- * into the equivalent human readable error string. This is useful
+ * into the equivalent human readable error string. This is useful
* for printing meaningful error messages.
*/
CURL_EXTERN const char *curl_share_strerror(CURLSHcode);
@@ -3227,7 +3238,7 @@ CURL_EXTERN CURLcode curl_easy_pause(CURL *handle, int bitmask);
#include "websockets.h"
#include "mprintf.h"
-/* the typechecker doesn't work in C++ (yet) */
+/* the typechecker does not work in C++ (yet) */
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && \
((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) && \
!defined(__cplusplus) && !defined(CURL_DISABLE_TYPECHECK)
diff --git a/libs/libcurl/include/curl/curlver.h b/libs/libcurl/include/curl/curlver.h
index 7faa482afc..5cdafed8a1 100644
--- a/libs/libcurl/include/curl/curlver.h
+++ b/libs/libcurl/include/curl/curlver.h
@@ -32,12 +32,12 @@
/* This is the version number of the libcurl package from which this header
file origins: */
-#define LIBCURL_VERSION "8.8.0"
+#define LIBCURL_VERSION "8.9.0"
/* The numeric version number is also available "in parts" by using these
defines: */
#define LIBCURL_VERSION_MAJOR 8
-#define LIBCURL_VERSION_MINOR 8
+#define LIBCURL_VERSION_MINOR 9
#define LIBCURL_VERSION_PATCH 0
/* This is the numeric version of the libcurl version number, meant for easier
@@ -48,7 +48,7 @@
Where XX, YY and ZZ are the main version, release and patch numbers in
hexadecimal (using 8 bits each). All three numbers are always represented
- using two digits. 1.2 would appear as "0x010200" while version 9.11.7
+ using two digits. 1.2 would appear as "0x010200" while version 9.11.7
appears as "0x090b07".
This 6-digit (24 bits) hexadecimal number does not show pre-release number,
@@ -59,7 +59,7 @@
CURL_VERSION_BITS() macro since curl's own configure script greps for it
and needs it to contain the full number.
*/
-#define LIBCURL_VERSION_NUM 0x080800
+#define LIBCURL_VERSION_NUM 0x080900
/*
* This is the date and time when the full source package was created. The
@@ -70,7 +70,7 @@
*
* "2007-11-23"
*/
-#define LIBCURL_TIMESTAMP "2024-05-22"
+#define LIBCURL_TIMESTAMP "2024-07-24"
#define CURL_VERSION_BITS(x,y,z) ((x)<<16|(y)<<8|(z))
#define CURL_AT_LEAST_VERSION(x,y,z) \
diff --git a/libs/libcurl/include/curl/easy.h b/libs/libcurl/include/curl/easy.h
index e9c46579f0..f21588d820 100644
--- a/libs/libcurl/include/curl/easy.h
+++ b/libs/libcurl/include/curl/easy.h
@@ -50,7 +50,7 @@ CURL_EXTERN void curl_easy_cleanup(CURL *curl);
*
* Request internal information from the curl session with this function.
* The third argument MUST be pointing to the specific type of the used option
- * which is documented in each man page of the option. The data pointed to
+ * which is documented in each manpage of the option. The data pointed to
* will be filled in accordingly and can be relied upon only if the function
* returns CURLE_OK. This function is intended to get used *AFTER* a performed
* transfer, all results from this function are undefined until the transfer
diff --git a/libs/libcurl/include/curl/mprintf.h b/libs/libcurl/include/curl/mprintf.h
index 7880024983..fe27d98c1d 100644
--- a/libs/libcurl/include/curl/mprintf.h
+++ b/libs/libcurl/include/curl/mprintf.h
@@ -32,7 +32,8 @@
extern "C" {
#endif
-#if (defined(__GNUC__) || defined(__clang__)) && \
+#if (defined(__GNUC__) || defined(__clang__) || \
+ defined(__IAR_SYSTEMS_ICC__)) && \
defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \
!defined(CURL_NO_FMT_CHECKS)
#if defined(__MINGW32__) && !defined(__clang__)
diff --git a/libs/libcurl/include/curl/multi.h b/libs/libcurl/include/curl/multi.h
index 9a808f1f99..0041c9158a 100644
--- a/libs/libcurl/include/curl/multi.h
+++ b/libs/libcurl/include/curl/multi.h
@@ -24,7 +24,7 @@
*
***************************************************************************/
/*
- This is an "external" header file. Don't give away any internals here!
+ This is an "external" header file. Do not give away any internals here!
GOALS
@@ -66,7 +66,7 @@ typedef enum {
CURLM_OK,
CURLM_BAD_HANDLE, /* the passed-in handle is not a valid CURLM handle */
CURLM_BAD_EASY_HANDLE, /* an easy handle was not good/valid */
- CURLM_OUT_OF_MEMORY, /* if you ever get this, you're in deep sh*t */
+ CURLM_OUT_OF_MEMORY, /* if you ever get this, you are in deep sh*t */
CURLM_INTERNAL_ERROR, /* this is a libcurl bug */
CURLM_BAD_SOCKET, /* the passed in socket argument did not match */
CURLM_UNKNOWN_OPTION, /* curl_multi_setopt() with unsupported option */
@@ -109,7 +109,7 @@ struct CURLMsg {
typedef struct CURLMsg CURLMsg;
/* Based on poll(2) structure and values.
- * We don't use pollfd and POLL* constants explicitly
+ * We do not use pollfd and POLL* constants explicitly
* to cover platforms without poll(). */
#define CURL_WAIT_POLLIN 0x0001
#define CURL_WAIT_POLLPRI 0x0002
@@ -205,7 +205,7 @@ CURL_EXTERN CURLMcode curl_multi_wakeup(CURLM *multi_handle);
/*
* Name: curl_multi_perform()
*
- * Desc: When the app thinks there's data available for curl it calls this
+ * Desc: When the app thinks there is data available for curl it calls this
* function to read/write whatever there is right now. This returns
* as soon as the reads and writes are done. This function does not
* require that there actually is data available for reading or that
@@ -236,7 +236,7 @@ CURL_EXTERN CURLMcode curl_multi_cleanup(CURLM *multi_handle);
/*
* Name: curl_multi_info_read()
*
- * Desc: Ask the multi handle if there's any messages/informationals from
+ * Desc: Ask the multi handle if there is any messages/informationals from
* the individual transfers. Messages include informationals such as
* error code from the transfer or just the fact that a transfer is
* completed. More details on these should be written down as well.
@@ -253,7 +253,7 @@ CURL_EXTERN CURLMcode curl_multi_cleanup(CURLM *multi_handle);
* we will provide the particular "transfer handle" in that struct
* and that should/could/would be used in subsequent
* curl_easy_getinfo() calls (or similar). The point being that we
- * must never expose complex structs to applications, as then we'll
+ * must never expose complex structs to applications, as then we will
* undoubtably get backwards compatibility problems in the future.
*
* Returns: A pointer to a filled-in struct, or NULL if it failed or ran out
@@ -268,7 +268,7 @@ CURL_EXTERN CURLMsg *curl_multi_info_read(CURLM *multi_handle,
* Name: curl_multi_strerror()
*
* Desc: The curl_multi_strerror function may be used to turn a CURLMcode
- * value into the equivalent human readable error string. This is
+ * value into the equivalent human readable error string. This is
* useful for printing meaningful error messages.
*
* Returns: A pointer to a null-terminated error message.
@@ -282,7 +282,7 @@ CURL_EXTERN const char *curl_multi_strerror(CURLMcode);
* Desc: An alternative version of curl_multi_perform() that allows the
* application to pass in one of the file descriptors that have been
* detected to have "action" on them and let libcurl perform.
- * See man page for details.
+ * See manpage for details.
*/
#define CURL_POLL_NONE 0
#define CURL_POLL_IN 1
diff --git a/libs/libcurl/include/curl/system.h b/libs/libcurl/include/curl/system.h
index 01ff338b15..5285aa0043 100644
--- a/libs/libcurl/include/curl/system.h
+++ b/libs/libcurl/include/curl/system.h
@@ -46,7 +46,7 @@
* As a general rule, curl_off_t shall not be mapped to off_t. This rule shall
* only be violated if off_t is the only 64-bit data type available and the
* size of off_t is independent of large file support settings. Keep your
- * build on the safe side avoiding an off_t gating. If you have a 64-bit
+ * build on the safe side avoiding an off_t gating. If you have a 64-bit
* off_t then take for sure that another 64-bit data type exists, dig deeper
* and you will find it.
*
@@ -402,7 +402,7 @@
# define CURL_PULL_SYS_SOCKET_H 1
#else
-/* generic "safe guess" on old 32 bit style */
+/* generic "safe guess" on old 32-bit style */
# define CURL_TYPEOF_CURL_OFF_T long
# define CURL_FORMAT_CURL_OFF_T "ld"
# define CURL_FORMAT_CURL_OFF_TU "lu"
diff --git a/libs/libcurl/include/curl/typecheck-gcc.h b/libs/libcurl/include/curl/typecheck-gcc.h
index 8fd0d91e60..909fc6b7ff 100644
--- a/libs/libcurl/include/curl/typecheck-gcc.h
+++ b/libs/libcurl/include/curl/typecheck-gcc.h
@@ -34,11 +34,11 @@
* _curl_easy_setopt_err_sometype below
*
* NOTE: We use two nested 'if' statements here instead of the && operator, in
- * order to work around gcc bug #32061. It affects only gcc 4.3.x/4.4.x
+ * order to work around gcc bug #32061. It affects only gcc 4.3.x/4.4.x
* when compiling with -Wlogical-op.
*
- * To add an option that uses the same type as an existing option, you'll just
- * need to extend the appropriate _curl_*_option macro
+ * To add an option that uses the same type as an existing option, you will
+ * just need to extend the appropriate _curl_*_option macro
*/
#define curl_easy_setopt(handle, option, value) \
__extension__({ \
@@ -245,7 +245,7 @@ CURLWARNING(_curl_easy_getinfo_err_curl_off_t,
/* To add a new option to one of the groups, just add
* (option) == CURLOPT_SOMETHING
- * to the or-expression. If the option takes a long or curl_off_t, you don't
+ * to the or-expression. If the option takes a long or curl_off_t, you do not
* have to do anything
*/
@@ -678,7 +678,7 @@ typedef CURLcode (*_curl_ssl_ctx_callback4)(CURL *, const void *,
const void *);
#ifdef HEADER_SSL_H
/* hack: if we included OpenSSL's ssl.h, we know about SSL_CTX
- * this will of course break if we're included before OpenSSL headers...
+ * this will of course break if we are included before OpenSSL headers...
*/
typedef CURLcode (*_curl_ssl_ctx_callback5)(CURL *, SSL_CTX *, void *);
typedef CURLcode (*_curl_ssl_ctx_callback6)(CURL *, SSL_CTX *, const void *);
diff --git a/libs/libcurl/include/curl/urlapi.h b/libs/libcurl/include/curl/urlapi.h
index b65735cf62..46e79114e1 100644
--- a/libs/libcurl/include/curl/urlapi.h
+++ b/libs/libcurl/include/curl/urlapi.h
@@ -97,11 +97,12 @@ typedef enum {
#define CURLU_NO_AUTHORITY (1<<10) /* Allow empty authority when the
scheme is unknown. */
#define CURLU_ALLOW_SPACE (1<<11) /* Allow spaces in the URL */
-#define CURLU_PUNYCODE (1<<12) /* get the host name in punycode */
+#define CURLU_PUNYCODE (1<<12) /* get the hostname in punycode */
#define CURLU_PUNY2IDN (1<<13) /* punycode => IDN conversion */
#define CURLU_GET_EMPTY (1<<14) /* allow empty queries and fragments
when extracting the URL or the
components */
+#define CURLU_NO_GUESS_SCHEME (1<<15) /* for get, do not accept a guess */
typedef struct Curl_URL CURLU;
@@ -142,7 +143,7 @@ CURL_EXTERN CURLUcode curl_url_set(CURLU *handle, CURLUPart what,
/*
* curl_url_strerror() turns a CURLUcode value into the equivalent human
- * readable error string. This is useful for printing meaningful error
+ * readable error string. This is useful for printing meaningful error
* messages.
*/
CURL_EXTERN const char *curl_url_strerror(CURLUcode);
diff --git a/libs/libcurl/src/CMakeLists.txt b/libs/libcurl/src/CMakeLists.txt
index 47392ca79e..e0d8bcfd7c 100644
--- a/libs/libcurl/src/CMakeLists.txt
+++ b/libs/libcurl/src/CMakeLists.txt
@@ -65,29 +65,6 @@ if(ENABLE_CURLDEBUG)
set_source_files_properties(memdebug.c curl_multibyte.c PROPERTIES SKIP_UNITY_BUILD_INCLUSION ON)
endif()
-transform_makefile_inc("Makefile.soname" "${CMAKE_CURRENT_BINARY_DIR}/Makefile.soname.cmake")
-include(${CMAKE_CURRENT_BINARY_DIR}/Makefile.soname.cmake)
-
-if(CMAKE_SYSTEM_NAME STREQUAL "AIX" OR
- CMAKE_SYSTEM_NAME STREQUAL "Linux" OR
- CMAKE_SYSTEM_NAME STREQUAL "Darwin" OR
- CMAKE_SYSTEM_NAME STREQUAL "SunOS" OR
- CMAKE_SYSTEM_NAME STREQUAL "GNU/kFreeBSD" OR
-
- # FreeBSD comes with the a.out and elf flavours
- # but a.out was supported up to version 3.x and
- # elf from 3.x. I cannot imagine someone running
- # CMake on those ancient systems
- CMAKE_SYSTEM_NAME STREQUAL "FreeBSD" OR
-
- CMAKE_SYSTEM_NAME STREQUAL "Haiku")
-
- math(EXPR CMAKESONAME "${VERSIONCHANGE} - ${VERSIONDEL}")
- set(CMAKEVERSION "${CMAKESONAME}.${VERSIONDEL}.${VERSIONADD}")
-else()
- unset(CMAKESONAME)
-endif()
-
## Library definition
# Add "_imp" as a suffix before the extension to avoid conflicting with
@@ -169,10 +146,6 @@ if(BUILD_STATIC_LIBS)
INTERPROCEDURAL_OPTIMIZATION_RELEASE TRUE
INTERPROCEDURAL_OPTIMIZATION_RELWITHDEBINFO TRUE)
endif()
- if(CMAKEVERSION AND CMAKESONAME)
- set_target_properties(${LIB_STATIC} PROPERTIES
- VERSION ${CMAKEVERSION} SOVERSION ${CMAKESONAME})
- endif()
target_include_directories(${LIB_STATIC} INTERFACE
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
@@ -185,8 +158,8 @@ if(BUILD_SHARED_LIBS)
add_library(${PROJECT_NAME}::${LIB_SHARED} ALIAS ${LIB_SHARED})
if(WIN32 OR CYGWIN)
if(CYGWIN)
- # For cygwin always compile dllmain.c as a separate unit since it
- # includes windows.h, which shouldn't be included in other units.
+ # For Cygwin always compile dllmain.c as a separate unit since it
+ # includes windows.h, which should not be included in other units.
set_source_files_properties(dllmain.c PROPERTIES
SKIP_UNITY_BUILD_INCLUSION ON)
endif()
@@ -213,14 +186,40 @@ if(BUILD_SHARED_LIBS)
INTERPROCEDURAL_OPTIMIZATION_RELEASE TRUE
INTERPROCEDURAL_OPTIMIZATION_RELWITHDEBINFO TRUE)
endif()
- if(CMAKEVERSION AND CMAKESONAME)
- set_target_properties(${LIB_SHARED} PROPERTIES
- VERSION ${CMAKEVERSION} SOVERSION ${CMAKESONAME})
- endif()
target_include_directories(${LIB_SHARED} INTERFACE
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
$<BUILD_INTERFACE:${CURL_SOURCE_DIR}/include>)
+
+ if(CMAKE_DLL_NAME_WITH_SOVERSION OR
+ CYGWIN OR
+ APPLE OR
+ CMAKE_SYSTEM_NAME STREQUAL "AIX" OR
+ CMAKE_SYSTEM_NAME STREQUAL "Linux" OR
+ CMAKE_SYSTEM_NAME STREQUAL "SunOS" OR
+ CMAKE_SYSTEM_NAME STREQUAL "Haiku" OR
+ CMAKE_SYSTEM_NAME STREQUAL "GNU/kFreeBSD" OR
+ # FreeBSD comes with the a.out and ELF flavours but a.out was supported
+ # up to v3.x and ELF from v3.x. I cannot imagine someone running CMake
+ # on those ancient systems.
+ CMAKE_SYSTEM_NAME STREQUAL "FreeBSD")
+ set(soversion_default TRUE)
+ else()
+ set(soversion_default FALSE)
+ endif()
+
+ option(CURL_LIBCURL_SOVERSION "Enable libcurl SOVERSION" ${soversion_default})
+
+ if(CURL_LIBCURL_SOVERSION)
+ transform_makefile_inc("Makefile.soname" "${CMAKE_CURRENT_BINARY_DIR}/Makefile.soname.cmake")
+ include(${CMAKE_CURRENT_BINARY_DIR}/Makefile.soname.cmake)
+
+ math(EXPR CMAKESONAME "${VERSIONCHANGE} - ${VERSIONDEL}")
+ set(CMAKEVERSION "${CMAKESONAME}.${VERSIONDEL}.${VERSIONADD}")
+
+ set_target_properties(${LIB_SHARED} PROPERTIES
+ VERSION "${CMAKEVERSION}" SOVERSION "${CMAKESONAME}")
+ endif()
endif()
add_library(${LIB_NAME} ALIAS ${LIB_SELECTED})
diff --git a/libs/libcurl/src/Makefile.am b/libs/libcurl/src/Makefile.am
index 4fe1ef7f74..7f615a0711 100644
--- a/libs/libcurl/src/Makefile.am
+++ b/libs/libcurl/src/Makefile.am
@@ -126,7 +126,7 @@ checksrc:
-W$(srcdir)/curl_config.h $(srcdir)/*.[ch] $(srcdir)/vauth/*.[ch] \
$(srcdir)/vtls/*.[ch] $(srcdir)/vquic/*.[ch] $(srcdir)/vssh/*.[ch])
-if CURLDEBUG
+if DEBUGBUILD
# for debug builds, we scan the sources on all regular make invokes
all-local: checksrc
endif
diff --git a/libs/libcurl/src/Makefile.in b/libs/libcurl/src/Makefile.in
index 4f0c9d1651..3bc0887503 100644
--- a/libs/libcurl/src/Makefile.in
+++ b/libs/libcurl/src/Makefile.in
@@ -168,7 +168,6 @@ am__aclocal_m4_deps = $(top_srcdir)/m4/curl-amissl.m4 \
$(top_srcdir)/m4/xc-am-iface.m4 \
$(top_srcdir)/m4/xc-cc-check.m4 \
$(top_srcdir)/m4/xc-lt-iface.m4 \
- $(top_srcdir)/m4/xc-translit.m4 \
$(top_srcdir)/m4/xc-val-flgs.m4 \
$(top_srcdir)/m4/zz40-xc-ovr.m4 \
$(top_srcdir)/m4/zz50-xc-ovr.m4 \
@@ -966,6 +965,8 @@ LD = @LD@
LDFLAGS = @LDFLAGS@
LIBCURL_LIBS = @LIBCURL_LIBS@
LIBCURL_NO_SHARED = @LIBCURL_NO_SHARED@
+LIBCURL_PC_REQUIRES = @LIBCURL_PC_REQUIRES@
+LIBCURL_PC_REQUIRES_PRIVATE = @LIBCURL_PC_REQUIRES_PRIVATE@
LIBOBJS = @LIBOBJS@
# Prevent LIBS from being used for all link targets
@@ -4674,7 +4675,7 @@ distdir-am: $(DISTFILES)
done
check-am: all-am
check: check-am
-@CURLDEBUG_FALSE@all-local:
+@DEBUGBUILD_FALSE@all-local:
all-am: Makefile $(LTLIBRARIES) curl_config.h all-local
installdirs:
for dir in "$(DESTDIR)$(libdir)"; do \
@@ -5507,7 +5508,7 @@ checksrc:
$(srcdir)/vtls/*.[ch] $(srcdir)/vquic/*.[ch] $(srcdir)/vssh/*.[ch])
# for debug builds, we scan the sources on all regular make invokes
-@CURLDEBUG_TRUE@all-local: checksrc
+@DEBUGBUILD_TRUE@all-local: checksrc
tidy:
$(TIDY) $(CSOURCES) $(TIDYFLAGS) -- $(AM_CPPFLAGS) $(CPPFLAGS) -DHAVE_CONFIG_H
diff --git a/libs/libcurl/src/altsvc.c b/libs/libcurl/src/altsvc.c
index 590c580087..f8d3516952 100644
--- a/libs/libcurl/src/altsvc.c
+++ b/libs/libcurl/src/altsvc.c
@@ -211,7 +211,7 @@ static CURLcode altsvc_load(struct altsvcinfo *asi, const char *file)
CURLcode result = CURLE_OK;
FILE *fp;
- /* we need a private copy of the file name so that the altsvc cache file
+ /* we need a private copy of the filename so that the altsvc cache file
name survives an easy handle reset */
free(asi->filename);
asi->filename = strdup(file);
@@ -270,7 +270,7 @@ static CURLcode altsvc_out(struct altsvc *as, FILE *fp)
"%s %s%s%s %u "
"\"%d%02d%02d "
"%02d:%02d:%02d\" "
- "%u %d\n",
+ "%u %u\n",
Curl_alpnid2str(as->src.alpnid),
src6_pre, as->src.host, src6_post,
as->src.port,
@@ -373,7 +373,7 @@ CURLcode Curl_altsvc_save(struct Curl_easy *data,
file = altsvc->filename;
if((altsvc->flags & CURLALTSVC_READONLYFILE) || !file || !file[0])
- /* marked as read-only, no file or zero length file name */
+ /* marked as read-only, no file or zero length filename */
return CURLE_OK;
result = Curl_fopen(data, file, &out, &tempstore);
@@ -430,7 +430,7 @@ static bool hostcompare(const char *host, const char *check)
if(hlen && (host[hlen - 1] == '.'))
hlen--;
if(hlen != clen)
- /* they can't match if they have different lengths */
+ /* they cannot match if they have different lengths */
return FALSE;
return strncasecompare(host, check, hlen);
}
@@ -462,7 +462,7 @@ static time_t altsvc_debugtime(void *unused)
char *timestr = getenv("CURL_TIME");
(void)unused;
if(timestr) {
- unsigned long val = strtol(timestr, NULL, 10);
+ long val = strtol(timestr, NULL, 10);
return (time_t)val;
}
return time(NULL);
@@ -477,11 +477,11 @@ static time_t altsvc_debugtime(void *unused)
* Curl_altsvc_parse() takes an incoming alt-svc response header and stores
* the data correctly in the cache.
*
- * 'value' points to the header *value*. That's contents to the right of the
+ * 'value' points to the header *value*. That is contents to the right of the
* header name.
*
* Currently this function rejects invalid data without returning an error.
- * Invalid host name, port number will result in the specific alternative
+ * Invalid hostname, port number will result in the specific alternative
* being rejected. Unknown protocols are skipped.
*/
CURLcode Curl_altsvc_parse(struct Curl_easy *data,
@@ -531,7 +531,7 @@ CURLcode Curl_altsvc_parse(struct Curl_easy *data,
bool valid = TRUE;
p++;
if(*p != ':') {
- /* host name starts here */
+ /* hostname starts here */
const char *hostp = p;
if(*p == '[') {
/* pass all valid IPv6 letters - does not handle zone id */
@@ -549,7 +549,7 @@ CURLcode Curl_altsvc_parse(struct Curl_easy *data,
len = p - hostp;
}
if(!len || (len >= MAX_ALTSVC_HOSTLEN)) {
- infof(data, "Excessive alt-svc host name, ignoring.");
+ infof(data, "Excessive alt-svc hostname, ignoring.");
valid = FALSE;
}
else {
@@ -624,7 +624,7 @@ CURLcode Curl_altsvc_parse(struct Curl_easy *data,
num = strtoul(value_ptr, &end_ptr, 10);
if((end_ptr != value_ptr) && (num < ULONG_MAX)) {
if(strcasecompare("ma", option))
- maxage = num;
+ maxage = (time_t)num;
else if(strcasecompare("persist", option) && (num == 1))
persist = TRUE;
}
@@ -651,7 +651,7 @@ CURLcode Curl_altsvc_parse(struct Curl_easy *data,
}
else
break;
- /* after the double quote there can be a comma if there's another
+ /* after the double quote there can be a comma if there is another
string or a semicolon if no more */
if(*p == ',') {
/* comma means another alternative is presented */
@@ -696,7 +696,7 @@ bool Curl_altsvc_lookup(struct altsvcinfo *asi,
if((as->src.alpnid == srcalpnid) &&
hostcompare(srchost, as->src.host) &&
(as->src.port == srcport) &&
- (versions & as->dst.alpnid)) {
+ (versions & (int)as->dst.alpnid)) {
/* match */
*dstentry = as;
return TRUE;
diff --git a/libs/libcurl/src/altsvc.h b/libs/libcurl/src/altsvc.h
index 28fe1ce7f6..c3d2e8da21 100644
--- a/libs/libcurl/src/altsvc.h
+++ b/libs/libcurl/src/altsvc.h
@@ -47,7 +47,7 @@ struct altsvc {
struct althost dst;
time_t expires;
bool persist;
- int prio;
+ unsigned int prio;
struct Curl_llist_element node;
};
diff --git a/libs/libcurl/src/amigaos.c b/libs/libcurl/src/amigaos.c
index 1448def49a..9951d07312 100644
--- a/libs/libcurl/src/amigaos.c
+++ b/libs/libcurl/src/amigaos.c
@@ -117,7 +117,7 @@ void Curl_amiga_cleanup(void)
#ifdef CURLRES_AMIGA
/*
- * Because we need to handle the different cases in hostip4.c at run-time,
+ * Because we need to handle the different cases in hostip4.c at runtime,
* not at compile-time, based on what was detected in Curl_amiga_init(),
* we replace it completely with our own as to not complicate the baseline
* code. Assumes malloc/calloc/free are thread safe because Curl_he2ai()
diff --git a/libs/libcurl/src/arpa_telnet.h b/libs/libcurl/src/arpa_telnet.h
index 85f739f414..536b6a6de9 100644
--- a/libs/libcurl/src/arpa_telnet.h
+++ b/libs/libcurl/src/arpa_telnet.h
@@ -77,7 +77,7 @@ static const char * const telnetoptions[]=
#define CURL_GA 249 /* Go Ahead, reverse the line */
#define CURL_SB 250 /* SuBnegotiation */
#define CURL_WILL 251 /* Our side WILL use this option */
-#define CURL_WONT 252 /* Our side WON'T use this option */
+#define CURL_WONT 252 /* Our side will not use this option */
#define CURL_DO 253 /* DO use this option! */
#define CURL_DONT 254 /* DON'T use this option! */
#define CURL_IAC 255 /* Interpret As Command */
diff --git a/libs/libcurl/src/asyn-ares.c b/libs/libcurl/src/asyn-ares.c
index 1504377335..77b0a1b90a 100644
--- a/libs/libcurl/src/asyn-ares.c
+++ b/libs/libcurl/src/asyn-ares.c
@@ -65,7 +65,7 @@
# define CARES_STATICLIB
#endif
#include <ares.h>
-#include <ares_version.h> /* really old c-ares didn't include this by
+#include <ares_version.h> /* really old c-ares did not include this by
itself */
#if ARES_VERSION >= 0x010500
@@ -112,8 +112,8 @@ struct thread_data {
/* How long we are willing to wait for additional parallel responses after
obtaining a "definitive" one. For old c-ares without getaddrinfo.
- This is intended to equal the c-ares default timeout. cURL always uses that
- default value. Unfortunately, c-ares doesn't expose its default timeout in
+ This is intended to equal the c-ares default timeout. cURL always uses that
+ default value. Unfortunately, c-ares does not expose its default timeout in
its API, but it is officially documented as 5 seconds.
See query_completed_cb() for an explanation of how this is used.
@@ -126,8 +126,8 @@ static int ares_ver = 0;
/*
* Curl_resolver_global_init() - the generic low-level asynchronous name
- * resolve API. Called from curl_global_init() to initialize global resolver
- * environment. Initializes ares library.
+ * resolve API. Called from curl_global_init() to initialize global resolver
+ * environment. Initializes ares library.
*/
int Curl_resolver_global_init(void)
{
@@ -169,7 +169,7 @@ static void sock_state_cb(void *data, ares_socket_t socket_fd,
*
* Called from curl_easy_init() -> Curl_open() to initialize resolver
* URL-state specific environment ('resolver' member of the UrlState
- * structure). Fills the passed pointer by the initialized ares_channel.
+ * structure). Fills the passed pointer by the initialized ares_channel.
*/
CURLcode Curl_resolver_init(struct Curl_easy *easy, void **resolver)
{
@@ -211,7 +211,7 @@ CURLcode Curl_resolver_init(struct Curl_easy *easy, void **resolver)
*
* Called from curl_easy_cleanup() -> Curl_close() to cleanup resolver
* URL-state specific environment ('resolver' member of the UrlState
- * structure). Destroys the ares channel.
+ * structure). Destroys the ares channel.
*/
void Curl_resolver_cleanup(void *resolver)
{
@@ -222,7 +222,7 @@ void Curl_resolver_cleanup(void *resolver)
* Curl_resolver_duphandle()
*
* Called from curl_easy_duphandle() to duplicate resolver URL-state specific
- * environment ('resolver' member of the UrlState structure). Duplicates the
+ * environment ('resolver' member of the UrlState structure). Duplicates the
* 'from' ares channel and passes the resulting channel to the 'to' pointer.
*/
CURLcode Curl_resolver_duphandle(struct Curl_easy *easy, void **to, void *from)
@@ -250,12 +250,12 @@ void Curl_resolver_cancel(struct Curl_easy *data)
}
/*
- * We're equivalent to Curl_resolver_cancel() for the c-ares resolver. We
+ * We are equivalent to Curl_resolver_cancel() for the c-ares resolver. We
* never block.
*/
void Curl_resolver_kill(struct Curl_easy *data)
{
- /* We don't need to check the resolver state because we can be called safely
+ /* We do not need to check the resolver state because we can be called safely
at any time and we always do the same thing. */
Curl_resolver_cancel(data);
}
@@ -280,7 +280,7 @@ static void destroy_async_data(struct Curl_async *async)
/*
* Curl_resolver_getsock() is called when someone from the outside world
- * (using curl_multi_fdset()) wants to get our fd_set setup and we're talking
+ * (using curl_multi_fdset()) wants to get our fd_set setup and we are talking
* with ares. The caller must make sure that this function is only called when
* we have a working ares channel.
*
@@ -350,7 +350,7 @@ static int waitperform(struct Curl_easy *data, timediff_t timeout_ms)
}
if(num) {
- nfds = Curl_poll(pfd, num, timeout_ms);
+ nfds = Curl_poll(pfd, (unsigned int)num, timeout_ms);
if(nfds < 0)
return -1;
}
@@ -359,7 +359,7 @@ static int waitperform(struct Curl_easy *data, timediff_t timeout_ms)
if(!nfds)
/* Call ares_process() unconditionally here, even if we simply timed out
- above, as otherwise the ares name resolve won't timeout! */
+ above, as otherwise the ares name resolve will not timeout! */
ares_process_fd((ares_channel)data->state.async.resolver, ARES_SOCKET_BAD,
ARES_SOCKET_BAD);
else {
@@ -394,8 +394,8 @@ CURLcode Curl_resolver_is_resolved(struct Curl_easy *data,
return CURLE_UNRECOVERABLE_POLL;
#ifndef HAVE_CARES_GETADDRINFO
- /* Now that we've checked for any last minute results above, see if there are
- any responses still pending when the EXPIRE_HAPPY_EYEBALLS_DNS timer
+ /* Now that we have checked for any last minute results above, see if there
+ are any responses still pending when the EXPIRE_HAPPY_EYEBALLS_DNS timer
expires. */
if(res
&& res->num_pending
@@ -410,7 +410,7 @@ CURLcode Curl_resolver_is_resolved(struct Curl_easy *data,
&res->happy_eyeballs_dns_time, 0, sizeof(res->happy_eyeballs_dns_time));
/* Cancel the raw c-ares request, which will fire query_completed_cb() with
- ARES_ECANCELLED synchronously for all pending responses. This will
+ ARES_ECANCELLED synchronously for all pending responses. This will
leave us with res->num_pending == 0, which is perfect for the next
block. */
ares_cancel((ares_channel)data->state.async.resolver);
@@ -523,7 +523,7 @@ CURLcode Curl_resolver_wait_resolv(struct Curl_easy *data,
*entry = data->state.async.dns;
if(result)
- /* close the connection, since we can't return failure here without
+ /* close the connection, since we cannot return failure here without
cleaning up this connection properly. */
connclose(data->conn, "c-ares resolve failed");
@@ -603,57 +603,57 @@ static void query_completed_cb(void *arg, /* (struct connectdata *) */
/* If there are responses still pending, we presume they must be the
complementary IPv4 or IPv6 lookups that we started in parallel in
- Curl_resolver_getaddrinfo() (for Happy Eyeballs). If we've got a
+ Curl_resolver_getaddrinfo() (for Happy Eyeballs). If we have got a
"definitive" response from one of a set of parallel queries, we need to
- think about how long we're willing to wait for more responses. */
+ think about how long we are willing to wait for more responses. */
if(res->num_pending
/* Only these c-ares status values count as "definitive" for these
- purposes. For example, ARES_ENODATA is what we expect when there is
- no IPv6 entry for a domain name, and that's not a reason to get more
- aggressive in our timeouts for the other response. Other errors are
+ purposes. For example, ARES_ENODATA is what we expect when there is
+ no IPv6 entry for a domain name, and that is not a reason to get more
+ aggressive in our timeouts for the other response. Other errors are
either a result of bad input (which should affect all parallel
requests), local or network conditions, non-definitive server
responses, or us cancelling the request. */
&& (status == ARES_SUCCESS || status == ARES_ENOTFOUND)) {
- /* Right now, there can only be up to two parallel queries, so don't
+ /* Right now, there can only be up to two parallel queries, so do not
bother handling any other cases. */
DEBUGASSERT(res->num_pending == 1);
- /* It's possible that one of these parallel queries could succeed
- quickly, but the other could always fail or timeout (when we're
+ /* it is possible that one of these parallel queries could succeed
+ quickly, but the other could always fail or timeout (when we are
talking to a pool of DNS servers that can only successfully resolve
IPv4 address, for example).
- It's also possible that the other request could always just take
+ it is also possible that the other request could always just take
longer because it needs more time or only the second DNS server can
- fulfill it successfully. But, to align with the philosophy of Happy
- Eyeballs, we don't want to wait _too_ long or users will think
- requests are slow when IPv6 lookups don't actually work (but IPv4 ones
- do).
+ fulfill it successfully. But, to align with the philosophy of Happy
+ Eyeballs, we do not want to wait _too_ long or users will think
+ requests are slow when IPv6 lookups do not actually work (but IPv4
+ ones do).
So, now that we have a usable answer (some IPv4 addresses, some IPv6
addresses, or "no such domain"), we start a timeout for the remaining
- pending responses. Even though it is typical that this resolved
- request came back quickly, that needn't be the case. It might be that
- this completing request didn't get a result from the first DNS server
- or even the first round of the whole DNS server pool. So it could
- already be quite some time after we issued the DNS queries in the
- first place. Without modifying c-ares, we can't know exactly where in
- its retry cycle we are. We could guess based on how much time has
- gone by, but it doesn't really matter. Happy Eyeballs tells us that,
- given usable information in hand, we simply don't want to wait "too
- much longer" after we get a result.
+ pending responses. Even though it is typical that this resolved
+ request came back quickly, that needn't be the case. It might be that
+ this completing request did not get a result from the first DNS
+ server or even the first round of the whole DNS server pool. So it
+ could already be quite some time after we issued the DNS queries in
+ the first place. Without modifying c-ares, we cannot know exactly
+ where in its retry cycle we are. We could guess based on how much
+ time has gone by, but it does not really matter. Happy Eyeballs tells
+ us that, given usable information in hand, we simply do not want to
+ wait "too much longer" after we get a result.
We simply wait an additional amount of time equal to the default
- c-ares query timeout. That is enough time for a typical parallel
- response to arrive without being "too long". Even on a network
+ c-ares query timeout. That is enough time for a typical parallel
+ response to arrive without being "too long". Even on a network
where one of the two types of queries is failing or timing out
constantly, this will usually mean we wait a total of the default
c-ares timeout (5 seconds) plus the round trip time for the successful
- request, which seems bearable. The downside is that c-ares might race
+ request, which seems bearable. The downside is that c-ares might race
with us to issue one more retry just before we give up, but it seems
better to "waste" that request instead of trying to guess the perfect
- timeout to prevent it. After all, we don't even know where in the
+ timeout to prevent it. After all, we do not even know where in the
c-ares retry cycle each request is.
*/
res->happy_eyeballs_dns_time = Curl_now();
@@ -849,8 +849,8 @@ CURLcode Curl_set_dns_servers(struct Curl_easy *data,
/* If server is NULL or empty, this would purge all DNS servers
* from ares library, which will cause any and all queries to fail.
- * So, just return OK if none are configured and don't actually make
- * any changes to c-ares. This lets c-ares use its defaults, which
+ * So, just return OK if none are configured and do not actually make
+ * any changes to c-ares. This lets c-ares use its defaults, which
* it gets from the OS (for instance from /etc/resolv.conf on Linux).
*/
if(!(servers && servers[0]))
diff --git a/libs/libcurl/src/asyn-thread.c b/libs/libcurl/src/asyn-thread.c
index 779f285283..d6687fe8f8 100644
--- a/libs/libcurl/src/asyn-thread.c
+++ b/libs/libcurl/src/asyn-thread.c
@@ -168,7 +168,7 @@ struct thread_sync_data {
duplicate */
#ifndef CURL_DISABLE_SOCKETPAIR
struct Curl_easy *data;
- curl_socket_t sock_pair[2]; /* socket pair */
+ curl_socket_t sock_pair[2]; /* eventfd/pipes/socket pair */
#endif
int sock_error;
struct Curl_addrinfo *res;
@@ -251,7 +251,7 @@ int init_thread_sync_data(struct thread_data *td,
#ifndef CURL_DISABLE_SOCKETPAIR
/* create socket pair or pipe */
- if(wakeup_create(&tsd->sock_pair[0]) < 0) {
+ if(wakeup_create(tsd->sock_pair, FALSE) < 0) {
tsd->sock_pair[0] = CURL_SOCKET_BAD;
tsd->sock_pair[1] = CURL_SOCKET_BAD;
goto err_exit;
@@ -286,7 +286,7 @@ static CURLcode getaddrinfo_complete(struct Curl_easy *data)
result = Curl_addrinfo_callback(data, tsd->sock_error, tsd->res);
/* The tsd->res structure has been copied to async.dns and perhaps the DNS
- cache. Set our copy to NULL so destroy_thread_sync_data doesn't free it.
+ cache. Set our copy to NULL so destroy_thread_sync_data does not free it.
*/
tsd->res = NULL;
@@ -302,6 +302,14 @@ query_complete(DWORD err, DWORD bytes, LPWSAOVERLAPPED overlapped)
struct Curl_addrinfo *ca;
struct Curl_addrinfo *cafirst = NULL;
struct Curl_addrinfo *calast = NULL;
+#ifndef CURL_DISABLE_SOCKETPAIR
+#ifdef USE_EVENTFD
+ const void *buf;
+ const uint64_t val = 1;
+#else
+ char buf[1];
+#endif
+#endif
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wcast-align"
@@ -421,11 +429,14 @@ query_complete(DWORD err, DWORD bytes, LPWSAOVERLAPPED overlapped)
}
else {
#ifndef CURL_DISABLE_SOCKETPAIR
- char buf[1];
if(tsd->sock_pair[1] != CURL_SOCKET_BAD) {
- /* DNS has been resolved, signal client task */
+#ifdef USE_EVENTFD
+ buf = &val;
+#else
buf[0] = 1;
- if(swrite(tsd->sock_pair[1], buf, sizeof(buf)) < 0) {
+#endif
+ /* DNS has been resolved, signal client task */
+ if(wakeup_write(tsd->sock_pair[1], buf, sizeof(buf)) < 0) {
/* update sock_erro to errno */
tsd->sock_error = SOCKERRNO;
}
@@ -447,15 +458,26 @@ query_complete(DWORD err, DWORD bytes, LPWSAOVERLAPPED overlapped)
* For builds without ARES, but with USE_IPV6, create a resolver thread
* and wait on it.
*/
-static unsigned int CURL_STDCALL getaddrinfo_thread(void *arg)
+static
+#if defined(_WIN32_WCE) || defined(CURL_WINDOWS_APP)
+DWORD
+#else
+unsigned int
+#endif
+CURL_STDCALL getaddrinfo_thread(void *arg)
{
struct thread_sync_data *tsd = (struct thread_sync_data *)arg;
struct thread_data *td = tsd->td;
char service[12];
int rc;
#ifndef CURL_DISABLE_SOCKETPAIR
+#ifdef USE_EVENTFD
+ const void *buf;
+ const uint64_t val = 1;
+#else
char buf[1];
#endif
+#endif
msnprintf(service, sizeof(service), "%d", tsd->port);
@@ -480,9 +502,13 @@ static unsigned int CURL_STDCALL getaddrinfo_thread(void *arg)
else {
#ifndef CURL_DISABLE_SOCKETPAIR
if(tsd->sock_pair[1] != CURL_SOCKET_BAD) {
- /* DNS has been resolved, signal client task */
+#ifdef USE_EVENTFD
+ buf = &val;
+#else
buf[0] = 1;
- if(wakeup_write(tsd->sock_pair[1], buf, sizeof(buf)) < 0) {
+#endif
+ /* DNS has been resolved, signal client task */
+ if(wakeup_write(tsd->sock_pair[1], buf, sizeof(buf)) < 0) {
/* update sock_erro to errno */
tsd->sock_error = SOCKERRNO;
}
@@ -500,7 +526,13 @@ static unsigned int CURL_STDCALL getaddrinfo_thread(void *arg)
/*
* gethostbyname_thread() resolves a name and then exits.
*/
-static unsigned int CURL_STDCALL gethostbyname_thread(void *arg)
+static
+#if defined(_WIN32_WCE) || defined(CURL_WINDOWS_APP)
+DWORD
+#else
+unsigned int
+#endif
+CURL_STDCALL gethostbyname_thread(void *arg)
{
struct thread_sync_data *tsd = (struct thread_sync_data *)arg;
struct thread_data *td = tsd->td;
@@ -638,7 +670,8 @@ static bool init_resolve_thread(struct Curl_easy *data,
#ifdef _WIN32
if(Curl_isWindows8OrGreater && Curl_FreeAddrInfoExW &&
- Curl_GetAddrInfoExCancel && Curl_GetAddrInfoExW) {
+ Curl_GetAddrInfoExCancel && Curl_GetAddrInfoExW &&
+ !Curl_win32_impersonating()) {
#define MAX_NAME_LEN 256 /* max domain name is 253 chars */
#define MAX_PORT_LEN 8
WCHAR namebuf[MAX_NAME_LEN];
@@ -664,7 +697,7 @@ static bool init_resolve_thread(struct Curl_easy *data,
NULL, &td->tsd.w8.overlapped,
&query_complete, &td->tsd.w8.cancel_ev);
if(err != WSA_IO_PENDING)
- query_complete(err, 0, &td->tsd.w8.overlapped);
+ query_complete((DWORD)err, 0, &td->tsd.w8.overlapped);
return TRUE;
}
}
@@ -757,8 +790,8 @@ void Curl_resolver_kill(struct Curl_easy *data)
{
struct thread_data *td = data->state.async.tdata;
- /* If we're still resolving, we must wait for the threads to fully clean up,
- unfortunately. Otherwise, we can simply cancel to clean up any resolver
+ /* If we are still resolving, we must wait for the threads to fully clean up,
+ unfortunately. Otherwise, we can simply cancel to clean up any resolver
data. */
#ifdef _WIN32
if(td && td->complete_ev) {
@@ -829,7 +862,7 @@ CURLcode Curl_resolver_is_resolved(struct Curl_easy *data,
}
else {
/* poll for name lookup done with exponential backoff up to 250ms */
- /* should be fine even if this converts to 32 bit */
+ /* should be fine even if this converts to 32-bit */
timediff_t elapsed = Curl_timediff(Curl_now(),
data->progress.t_startsingle);
if(elapsed < 0)
diff --git a/libs/libcurl/src/asyn.h b/libs/libcurl/src/asyn.h
index f3f3fc5f6f..fd6792edb6 100644
--- a/libs/libcurl/src/asyn.h
+++ b/libs/libcurl/src/asyn.h
@@ -58,7 +58,7 @@ void Curl_resolver_global_cleanup(void);
* Curl_resolver_init()
* Called from curl_easy_init() -> Curl_open() to initialize resolver
* URL-state specific environment ('resolver' member of the UrlState
- * structure). Should fill the passed pointer by the initialized handler.
+ * structure). Should fill the passed pointer by the initialized handler.
* Returning anything else than CURLE_OK fails curl_easy_init() with the
* correspondent code.
*/
@@ -68,7 +68,7 @@ CURLcode Curl_resolver_init(struct Curl_easy *easy, void **resolver);
* Curl_resolver_cleanup()
* Called from curl_easy_cleanup() -> Curl_close() to cleanup resolver
* URL-state specific environment ('resolver' member of the UrlState
- * structure). Should destroy the handler and free all resources connected to
+ * structure). Should destroy the handler and free all resources connected to
* it.
*/
void Curl_resolver_cleanup(void *resolver);
@@ -76,9 +76,9 @@ void Curl_resolver_cleanup(void *resolver);
/*
* Curl_resolver_duphandle()
* Called from curl_easy_duphandle() to duplicate resolver URL-state specific
- * environment ('resolver' member of the UrlState structure). Should
+ * environment ('resolver' member of the UrlState structure). Should
* duplicate the 'from' handle and pass the resulting handle to the 'to'
- * pointer. Returning anything else than CURLE_OK causes failed
+ * pointer. Returning anything else than CURLE_OK causes failed
* curl_easy_duphandle() call.
*/
CURLcode Curl_resolver_duphandle(struct Curl_easy *easy, void **to,
@@ -89,7 +89,7 @@ CURLcode Curl_resolver_duphandle(struct Curl_easy *easy, void **to,
*
* It is called from inside other functions to cancel currently performing
* resolver request. Should also free any temporary resources allocated to
- * perform a request. This never waits for resolver threads to complete.
+ * perform a request. This never waits for resolver threads to complete.
*
* It is safe to call this when conn is in any state.
*/
@@ -99,8 +99,8 @@ void Curl_resolver_cancel(struct Curl_easy *data);
* Curl_resolver_kill().
*
* This acts like Curl_resolver_cancel() except it will block until any threads
- * associated with the resolver are complete. This never blocks for resolvers
- * that do not use threads. This is intended to be the "last chance" function
+ * associated with the resolver are complete. This never blocks for resolvers
+ * that do not use threads. This is intended to be the "last chance" function
* that cleans up an in-progress resolver completely (before its owner is about
* to die).
*
@@ -161,7 +161,7 @@ struct Curl_addrinfo *Curl_resolver_getaddrinfo(struct Curl_easy *data,
int *waitp);
#ifndef CURLRES_ASYNCH
-/* convert these functions if an asynch resolver isn't used */
+/* convert these functions if an asynch resolver is not used */
#define Curl_resolver_cancel(x) Curl_nop_stmt
#define Curl_resolver_kill(x) Curl_nop_stmt
#define Curl_resolver_is_resolved(x,y) CURLE_COULDNT_RESOLVE_HOST
diff --git a/libs/libcurl/src/bufref.c b/libs/libcurl/src/bufref.c
index 644aebf188..e94dc910b2 100644
--- a/libs/libcurl/src/bufref.c
+++ b/libs/libcurl/src/bufref.c
@@ -48,7 +48,7 @@ void Curl_bufref_init(struct bufref *br)
}
/*
- * Free the buffer and re-init the necessary fields. It doesn't touch the
+ * Free the buffer and re-init the necessary fields. It does not touch the
* 'signature' field and thus this buffer reference can be reused.
*/
diff --git a/libs/libcurl/src/c-hyper.c b/libs/libcurl/src/c-hyper.c
index d2523718f4..9780019f66 100644
--- a/libs/libcurl/src/c-hyper.c
+++ b/libs/libcurl/src/c-hyper.c
@@ -206,7 +206,7 @@ static int hyper_body_chunk(void *userdata, const hyper_buf *chunk)
struct SingleRequest *k = &data->req;
CURLcode result = CURLE_OK;
- if(0 == k->bodywrites) {
+ if(!k->bodywritten) {
#if defined(USE_NTLM)
struct connectdata *conn = data->conn;
if(conn->bits.close &&
@@ -324,7 +324,7 @@ static CURLcode empty_header(struct Curl_easy *data)
result = hyper_each_header(data, NULL, 0, NULL, 0) ?
CURLE_WRITE_ERROR : CURLE_OK;
if(result)
- failf(data, "hyperstream: couldn't pass blank header");
+ failf(data, "hyperstream: could not pass blank header");
/* Hyper does chunked decoding itself. If it was added during
* response header processing, remove it again. */
Curl_cwriter_remove_by_name(data, "chunked");
@@ -420,8 +420,8 @@ CURLcode Curl_hyper_stream(struct Curl_easy *data,
/* end of transfer */
data->req.done = TRUE;
infof(data, "hyperstream is done");
- if(!k->bodywrites) {
- /* hyper doesn't always call the body write callback */
+ if(!k->bodywritten) {
+ /* hyper does not always call the body write callback */
result = Curl_http_firstwrite(data);
}
break;
@@ -439,7 +439,7 @@ CURLcode Curl_hyper_stream(struct Curl_easy *data,
*didwhat = KEEP_RECV;
if(!resp) {
- failf(data, "hyperstream: couldn't get response");
+ failf(data, "hyperstream: could not get response");
return CURLE_RECV_ERROR;
}
@@ -462,7 +462,7 @@ CURLcode Curl_hyper_stream(struct Curl_easy *data,
headers = hyper_response_headers(resp);
if(!headers) {
- failf(data, "hyperstream: couldn't get response headers");
+ failf(data, "hyperstream: could not get response headers");
result = CURLE_RECV_ERROR;
break;
}
@@ -505,7 +505,7 @@ CURLcode Curl_hyper_stream(struct Curl_easy *data,
resp_body = hyper_response_body(resp);
if(!resp_body) {
- failf(data, "hyperstream: couldn't get response body");
+ failf(data, "hyperstream: could not get response body");
result = CURLE_RECV_ERROR;
break;
}
@@ -669,7 +669,7 @@ static int uploadstreamed(void *userdata, hyper_context *ctx,
goto out;
}
/* increasing the writebytecount here is a little premature but we
- don't know exactly when the body is sent */
+ do not know exactly when the body is sent */
data->req.writebytecount += fillcount;
Curl_pgrsSetUploadCounter(data, data->req.writebytecount);
rc = HYPER_POLL_READY;
@@ -772,7 +772,7 @@ static void http1xx_cb(void *arg, struct hyper_response *resp)
if(!result) {
headers = hyper_response_headers(resp);
if(!headers) {
- failf(data, "hyperstream: couldn't get 1xx response headers");
+ failf(data, "hyperstream: could not get 1xx response headers");
result = CURLE_RECV_ERROR;
}
}
@@ -1133,7 +1133,7 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done)
Curl_pgrsSetUploadSize(data, 0); /* nothing */
}
- Curl_xfer_setup(data, FIRSTSOCKET, -1, TRUE, FIRSTSOCKET);
+ Curl_xfer_setup1(data, CURL_XFER_SENDRECV, -1, TRUE);
conn->datastream = Curl_hyper_stream;
/* clear userpwd and proxyuserpwd to avoid reusing old credentials
@@ -1206,6 +1206,7 @@ static const struct Curl_crtype cr_hyper_protocol = {
Curl_creader_def_resume_from,
Curl_creader_def_rewind,
cr_hyper_unpause,
+ Curl_creader_def_is_paused,
Curl_creader_def_done,
sizeof(struct Curl_creader)
};
diff --git a/libs/libcurl/src/cf-h1-proxy.c b/libs/libcurl/src/cf-h1-proxy.c
index 085a343475..5340495f5e 100644
--- a/libs/libcurl/src/cf-h1-proxy.c
+++ b/libs/libcurl/src/cf-h1-proxy.c
@@ -65,7 +65,6 @@ typedef enum {
/* struct for HTTP CONNECT tunneling */
struct h1_tunnel_state {
- struct HTTP CONNECT;
struct dynbuf rcvbuf;
struct dynbuf request_data;
size_t nsent;
@@ -182,8 +181,8 @@ static void h1_tunnel_go_state(struct Curl_cfilter *cf,
data->info.httpcode = 0; /* clear it as it might've been used for the
proxy */
/* If a proxy-authorization header was used for the proxy, then we should
- make sure that it isn't accidentally used for the document request
- after we've connected. So let's free and clear it here. */
+ make sure that it is not accidentally used for the document request
+ after we have connected. So let's free and clear it here. */
Curl_safefree(data->state.aptr.proxyuserpwd);
#ifdef USE_HYPER
data->state.hconnect = FALSE;
@@ -222,8 +221,8 @@ static CURLcode start_CONNECT(struct Curl_cfilter *cf,
int http_minor;
CURLcode result;
- /* This only happens if we've looped here due to authentication
- reasons, and we don't really use the newly cloned URL here
+ /* This only happens if we have looped here due to authentication
+ reasons, and we do not really use the newly cloned URL here
then. Just free() it. */
Curl_safefree(data->req.newurl);
@@ -422,7 +421,7 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
if(ts->cl) {
/* A Content-Length based body: simply count down the counter
- and make sure to break out of the loop when we're done! */
+ and make sure to break out of the loop when we are done! */
ts->cl--;
if(ts->cl <= 0) {
ts->keepon = KEEPON_DONE;
@@ -440,7 +439,7 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
if(result)
return result;
if(Curl_httpchunk_is_done(data, &ts->ch)) {
- /* we're done reading chunks! */
+ /* we are done reading chunks! */
infof(data, "chunk reading DONE");
ts->keepon = KEEPON_DONE;
}
@@ -475,7 +474,7 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
if(result)
return result;
- /* Newlines are CRLF, so the CR is ignored as the line isn't
+ /* Newlines are CRLF, so the CR is ignored as the line is not
really terminated until the LF comes. Treat a following CR
as end-of-headers as well.*/
@@ -498,7 +497,7 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
}
else {
/* without content-length or chunked encoding, we
- can't keep the connection alive since the close is
+ cannot keep the connection alive since the close is
the end signal so we bail out at once instead */
CURL_TRC_CF(data, cf, "CONNECT: no content-length or chunked");
ts->keepon = KEEPON_DONE;
@@ -518,7 +517,7 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
return result;
Curl_dyn_reset(&ts->rcvbuf);
- } /* while there's buffer left and loop is requested */
+ } /* while there is buffer left and loop is requested */
if(error)
result = CURLE_RECV_ERROR;
@@ -666,8 +665,8 @@ static CURLcode start_CONNECT(struct Curl_cfilter *cf,
goto error;
}
- /* This only happens if we've looped here due to authentication
- reasons, and we don't really use the newly cloned URL here
+ /* This only happens if we have looped here due to authentication
+ reasons, and we do not really use the newly cloned URL here
then. Just free() it. */
Curl_safefree(data->req.newurl);
@@ -955,7 +954,7 @@ static CURLcode H1_CONNECT(struct Curl_cfilter *cf,
DEBUGASSERT(ts->tunnel_state == H1_TUNNEL_RESPONSE);
if(data->info.httpproxycode/100 != 2) {
- /* a non-2xx response and we have no next url to try. */
+ /* a non-2xx response and we have no next URL to try. */
Curl_safefree(data->req.newurl);
/* failure, close this connection to avoid reuse */
streamclose(conn, "proxy CONNECT failure");
@@ -1034,9 +1033,9 @@ static void cf_h1_proxy_adjust_pollset(struct Curl_cfilter *cf,
* and not waiting on something, we are tunneling. */
curl_socket_t sock = Curl_conn_cf_get_socket(cf, data);
if(ts) {
- /* when we've sent a CONNECT to a proxy, we should rather either
+ /* when we have sent a CONNECT to a proxy, we should rather either
wait for the socket to become readable to be able to get the
- response headers or if we're still sending the request, wait
+ response headers or if we are still sending the request, wait
for write. */
if(tunnel_want_send(ts))
Curl_pollset_set_out_only(data, ps, sock);
@@ -1077,6 +1076,7 @@ struct Curl_cftype Curl_cft_h1_proxy = {
cf_h1_proxy_destroy,
cf_h1_proxy_connect,
cf_h1_proxy_close,
+ Curl_cf_def_shutdown,
Curl_cf_http_proxy_get_host,
cf_h1_proxy_adjust_pollset,
Curl_cf_def_data_pending,
diff --git a/libs/libcurl/src/cf-h2-proxy.c b/libs/libcurl/src/cf-h2-proxy.c
index be9f932ecf..9d62352a8b 100644
--- a/libs/libcurl/src/cf-h2-proxy.c
+++ b/libs/libcurl/src/cf-h2-proxy.c
@@ -162,8 +162,8 @@ static void h2_tunnel_go_state(struct Curl_cfilter *cf,
CURL_TRC_CF(data, cf, "[%d] new tunnel state 'failed'", ts->stream_id);
ts->state = new_state;
/* If a proxy-authorization header was used for the proxy, then we should
- make sure that it isn't accidentally used for the document request
- after we've connected. So let's free and clear it here. */
+ make sure that it is not accidentally used for the document request
+ after we have connected. So let's free and clear it here. */
Curl_safefree(data->state.aptr.proxyuserpwd);
break;
}
@@ -181,7 +181,8 @@ struct cf_h2_proxy_ctx {
int32_t goaway_error;
int32_t last_stream_id;
BIT(conn_closed);
- BIT(goaway);
+ BIT(rcvd_goaway);
+ BIT(sent_goaway);
BIT(nw_out_blocked);
};
@@ -694,7 +695,7 @@ static int proxy_h2_on_frame_recv(nghttp2_session *session,
}
break;
case NGHTTP2_GOAWAY:
- ctx->goaway = TRUE;
+ ctx->rcvd_goaway = TRUE;
break;
default:
break;
@@ -1166,6 +1167,49 @@ static void cf_h2_proxy_destroy(struct Curl_cfilter *cf,
}
}
+static CURLcode cf_h2_proxy_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data, bool *done)
+{
+ struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ struct cf_call_data save;
+ CURLcode result;
+ int rv;
+
+ if(!cf->connected || !ctx->h2 || cf->shutdown || ctx->conn_closed) {
+ *done = TRUE;
+ return CURLE_OK;
+ }
+
+ CF_DATA_SAVE(save, cf, data);
+
+ if(!ctx->sent_goaway) {
+ rv = nghttp2_submit_goaway(ctx->h2, NGHTTP2_FLAG_NONE,
+ 0, 0,
+ (const uint8_t *)"shutown", sizeof("shutown"));
+ if(rv) {
+ failf(data, "nghttp2_submit_goaway() failed: %s(%d)",
+ nghttp2_strerror(rv), rv);
+ result = CURLE_SEND_ERROR;
+ goto out;
+ }
+ ctx->sent_goaway = TRUE;
+ }
+ /* GOAWAY submitted, process egress and ingress until nghttp2 is done. */
+ result = CURLE_OK;
+ if(nghttp2_session_want_write(ctx->h2))
+ result = proxy_h2_progress_egress(cf, data);
+ if(!result && nghttp2_session_want_read(ctx->h2))
+ result = proxy_h2_progress_ingress(cf, data);
+
+ *done = (ctx->conn_closed ||
+ (!result && !nghttp2_session_want_write(ctx->h2) &&
+ !nghttp2_session_want_read(ctx->h2)));
+out:
+ CF_DATA_RESTORE(cf, save);
+ cf->shutdown = (result || *done);
+ return result;
+}
+
static bool cf_h2_proxy_data_pending(struct Curl_cfilter *cf,
const struct Curl_easy *data)
{
@@ -1182,12 +1226,18 @@ static void cf_h2_proxy_adjust_pollset(struct Curl_cfilter *cf,
struct easy_pollset *ps)
{
struct cf_h2_proxy_ctx *ctx = cf->ctx;
+ struct cf_call_data save;
curl_socket_t sock = Curl_conn_cf_get_socket(cf, data);
bool want_recv, want_send;
- Curl_pollset_check(data, ps, sock, &want_recv, &want_send);
+ if(!cf->connected && ctx->h2) {
+ want_send = nghttp2_session_want_write(ctx->h2);
+ want_recv = nghttp2_session_want_read(ctx->h2);
+ }
+ else
+ Curl_pollset_check(data, ps, sock, &want_recv, &want_send);
+
if(ctx->h2 && (want_recv || want_send)) {
- struct cf_call_data save;
bool c_exhaust, s_exhaust;
CF_DATA_SAVE(save, cf, data);
@@ -1202,6 +1252,14 @@ static void cf_h2_proxy_adjust_pollset(struct Curl_cfilter *cf,
Curl_pollset_set(data, ps, sock, want_recv, want_send);
CF_DATA_RESTORE(cf, save);
}
+ else if(ctx->sent_goaway && !cf->shutdown) {
+ /* shutdown in progress */
+ CF_DATA_SAVE(save, cf, data);
+ want_send = nghttp2_session_want_write(ctx->h2);
+ want_recv = nghttp2_session_want_read(ctx->h2);
+ Curl_pollset_set(data, ps, sock, want_recv, want_send);
+ CF_DATA_RESTORE(cf, save);
+ }
}
static ssize_t h2_handle_tunnel_close(struct Curl_cfilter *cf,
@@ -1214,7 +1272,7 @@ static ssize_t h2_handle_tunnel_close(struct Curl_cfilter *cf,
if(ctx->tunnel.error == NGHTTP2_REFUSED_STREAM) {
CURL_TRC_CF(data, cf, "[%d] REFUSED_STREAM, try again on a new "
"connection", ctx->tunnel.stream_id);
- connclose(cf->conn, "REFUSED_STREAM"); /* don't use this anymore */
+ connclose(cf->conn, "REFUSED_STREAM"); /* do not use this anymore */
*err = CURLE_RECV_ERROR; /* trigger Curl_retry_request() later */
return -1;
}
@@ -1259,7 +1317,8 @@ static ssize_t tunnel_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
}
else if(ctx->tunnel.reset ||
(ctx->conn_closed && Curl_bufq_is_empty(&ctx->inbufq)) ||
- (ctx->goaway && ctx->last_stream_id < ctx->tunnel.stream_id)) {
+ (ctx->rcvd_goaway &&
+ ctx->last_stream_id < ctx->tunnel.stream_id)) {
*err = CURLE_RECV_ERROR;
nread = -1;
}
@@ -1306,7 +1365,7 @@ static ssize_t cf_h2_proxy_recv(struct Curl_cfilter *cf,
result = proxy_h2_progress_egress(cf, data);
if(result == CURLE_AGAIN) {
- /* pending data to send, need to be called again. Ideally, we'd
+ /* pending data to send, need to be called again. Ideally, we would
* monitor the socket for POLLOUT, but we might not be in SENDING
* transfer state any longer and are unable to make this happen.
*/
@@ -1418,7 +1477,7 @@ static ssize_t cf_h2_proxy_send(struct Curl_cfilter *cf,
/* Unable to send all data, due to connection blocked or H2 window
* exhaustion. Data is left in our stream buffer, or nghttp2's internal
* frame buffer or our network out buffer. */
- size_t rwin = nghttp2_session_get_stream_remote_window_size(
+ size_t rwin = (size_t)nghttp2_session_get_stream_remote_window_size(
ctx->h2, ctx->tunnel.stream_id);
if(rwin == 0) {
/* H2 flow window exhaustion.
@@ -1489,8 +1548,8 @@ static bool proxy_h2_connisalive(struct Curl_cfilter *cf,
return FALSE;
if(*input_pending) {
- /* This happens before we've sent off a request and the connection is
- not in use by any other transfer, there shouldn't be any data here,
+ /* This happens before we have sent off a request and the connection is
+ not in use by any other transfer, there should not be any data here,
only "protocol frames" */
CURLcode result;
ssize_t nread = -1;
@@ -1537,6 +1596,7 @@ struct Curl_cftype Curl_cft_h2_proxy = {
cf_h2_proxy_destroy,
cf_h2_proxy_connect,
cf_h2_proxy_close,
+ cf_h2_proxy_shutdown,
Curl_cf_http_proxy_get_host,
cf_h2_proxy_adjust_pollset,
cf_h2_proxy_data_pending,
diff --git a/libs/libcurl/src/cf-haproxy.c b/libs/libcurl/src/cf-haproxy.c
index c9a0102eab..d7c684b0fe 100644
--- a/libs/libcurl/src/cf-haproxy.c
+++ b/libs/libcurl/src/cf-haproxy.c
@@ -194,6 +194,7 @@ struct Curl_cftype Curl_cft_haproxy = {
cf_haproxy_destroy,
cf_haproxy_connect,
cf_haproxy_close,
+ Curl_cf_def_shutdown,
Curl_cf_def_get_host,
cf_haproxy_adjust_pollset,
Curl_cf_def_data_pending,
diff --git a/libs/libcurl/src/cf-https-connect.c b/libs/libcurl/src/cf-https-connect.c
index 681159f178..ae796d3a72 100644
--- a/libs/libcurl/src/cf-https-connect.c
+++ b/libs/libcurl/src/cf-https-connect.c
@@ -55,7 +55,8 @@ struct cf_hc_baller {
CURLcode result;
struct curltime started;
int reply_ms;
- bool enabled;
+ BIT(enabled);
+ BIT(shutdown);
};
static void cf_hc_baller_reset(struct cf_hc_baller *b,
@@ -322,6 +323,49 @@ out:
return result;
}
+static CURLcode cf_hc_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data, bool *done)
+{
+ struct cf_hc_ctx *ctx = cf->ctx;
+ struct cf_hc_baller *ballers[2];
+ size_t i;
+ CURLcode result = CURLE_OK;
+
+ DEBUGASSERT(data);
+ if(cf->connected) {
+ *done = TRUE;
+ return CURLE_OK;
+ }
+
+ /* shutdown all ballers that have not done so already. If one fails,
+ * continue shutting down others until all are shutdown. */
+ ballers[0] = &ctx->h3_baller;
+ ballers[1] = &ctx->h21_baller;
+ for(i = 0; i < sizeof(ballers)/sizeof(ballers[0]); i++) {
+ struct cf_hc_baller *b = ballers[i];
+ bool bdone = FALSE;
+ if(!cf_hc_baller_is_active(b) || b->shutdown)
+ continue;
+ b->result = b->cf->cft->do_shutdown(b->cf, data, &bdone);
+ if(b->result || bdone)
+ b->shutdown = TRUE; /* treat a failed shutdown as done */
+ }
+
+ *done = TRUE;
+ for(i = 0; i < sizeof(ballers)/sizeof(ballers[0]); i++) {
+ if(ballers[i] && !ballers[i]->shutdown)
+ *done = FALSE;
+ }
+ if(*done) {
+ for(i = 0; i < sizeof(ballers)/sizeof(ballers[0]); i++) {
+ if(ballers[i] && ballers[i]->result)
+ result = ballers[i]->result;
+ }
+ }
+ CURL_TRC_CF(data, cf, "shutdown -> %d, done=%d", result, *done);
+ return result;
+}
+
static void cf_hc_adjust_pollset(struct Curl_cfilter *cf,
struct Curl_easy *data,
struct easy_pollset *ps)
@@ -434,6 +478,7 @@ struct Curl_cftype Curl_cft_http_connect = {
cf_hc_destroy,
cf_hc_connect,
cf_hc_close,
+ cf_hc_shutdown,
Curl_cf_def_get_host,
cf_hc_adjust_pollset,
cf_hc_data_pending,
@@ -510,7 +555,7 @@ CURLcode Curl_cf_https_setup(struct Curl_easy *data,
if(data->state.httpwant == CURL_HTTP_VERSION_3ONLY) {
result = Curl_conn_may_http3(data, conn);
- if(result) /* can't do it */
+ if(result) /* cannot do it */
goto out;
try_h3 = TRUE;
try_h21 = FALSE;
diff --git a/libs/libcurl/src/cf-socket.c b/libs/libcurl/src/cf-socket.c
index 713499a2e8..bd3acb7e8b 100644
--- a/libs/libcurl/src/cf-socket.c
+++ b/libs/libcurl/src/cf-socket.c
@@ -35,6 +35,9 @@
#elif defined(HAVE_NETINET_TCP_H)
#include <netinet/tcp.h>
#endif
+#ifdef HAVE_NETINET_UDP_H
+#include <netinet/udp.h>
+#endif
#ifdef HAVE_SYS_IOCTL_H
#include <sys/ioctl.h>
#endif
@@ -53,6 +56,11 @@
#include <inet.h>
#endif
+#ifdef __DragonFly__
+/* Required for __DragonFly_version */
+#include <sys/param.h>
+#endif
+
#include "urldata.h"
#include "bufq.h"
#include "sendf.h"
@@ -73,6 +81,7 @@
#include "multihandle.h"
#include "rand.h"
#include "share.h"
+#include "strdup.h"
#include "version_win32.h"
/* The last 3 #include files should be in this order */
@@ -137,8 +146,12 @@ static void nosigpipe(struct Curl_easy *data,
#define nosigpipe(x,y) Curl_nop_stmt
#endif
-#if defined(__DragonFly__) || defined(USE_WINSOCK)
-/* DragonFlyBSD and Windows use millisecond units */
+#if defined(USE_WINSOCK) || \
+ (defined(__sun) && !defined(TCP_KEEPIDLE)) || \
+ (defined(__DragonFly__) && __DragonFly_version < 500702) || \
+ (defined(_WIN32) && !defined(TCP_KEEPIDLE))
+/* Solaris < 11.4, DragonFlyBSD < 500702 and Windows < 10.0.16299
+ * use millisecond units. */
#define KEEPALIVE_FACTOR(x) (x *= 1000)
#else
#define KEEPALIVE_FACTOR(x)
@@ -168,23 +181,50 @@ tcpkeepalive(struct Curl_easy *data,
sockfd, SOCKERRNO);
}
else {
-#if defined(SIO_KEEPALIVE_VALS)
+#if defined(SIO_KEEPALIVE_VALS) /* Windows */
+/* Windows 10, version 1709 (10.0.16299) and later versions */
+#if defined(TCP_KEEPIDLE) && defined(TCP_KEEPINTVL) && defined(TCP_KEEPCNT)
+ optval = curlx_sltosi(data->set.tcp_keepidle);
+ KEEPALIVE_FACTOR(optval);
+ if(setsockopt(sockfd, IPPROTO_TCP, TCP_KEEPIDLE,
+ (const char *)&optval, sizeof(optval)) < 0) {
+ infof(data, "Failed to set TCP_KEEPIDLE on fd "
+ "%" CURL_FORMAT_SOCKET_T ": errno %d",
+ sockfd, SOCKERRNO);
+ }
+ optval = curlx_sltosi(data->set.tcp_keepintvl);
+ KEEPALIVE_FACTOR(optval);
+ if(setsockopt(sockfd, IPPROTO_TCP, TCP_KEEPINTVL,
+ (const char *)&optval, sizeof(optval)) < 0) {
+ infof(data, "Failed to set TCP_KEEPINTVL on fd "
+ "%" CURL_FORMAT_SOCKET_T ": errno %d",
+ sockfd, SOCKERRNO);
+ }
+ optval = curlx_sltosi(data->set.tcp_keepcnt);
+ if(setsockopt(sockfd, IPPROTO_TCP, TCP_KEEPCNT,
+ (const char *)&optval, sizeof(optval)) < 0) {
+ infof(data, "Failed to set TCP_KEEPCNT on fd "
+ "%" CURL_FORMAT_SOCKET_T ": errno %d",
+ sockfd, SOCKERRNO);
+ }
+#else /* Windows < 10.0.16299 */
struct tcp_keepalive vals;
DWORD dummy;
vals.onoff = 1;
optval = curlx_sltosi(data->set.tcp_keepidle);
KEEPALIVE_FACTOR(optval);
- vals.keepalivetime = optval;
+ vals.keepalivetime = (u_long)optval;
optval = curlx_sltosi(data->set.tcp_keepintvl);
KEEPALIVE_FACTOR(optval);
- vals.keepaliveinterval = optval;
+ vals.keepaliveinterval = (u_long)optval;
if(WSAIoctl(sockfd, SIO_KEEPALIVE_VALS, (LPVOID) &vals, sizeof(vals),
NULL, 0, &dummy, NULL, NULL) != 0) {
infof(data, "Failed to set SIO_KEEPALIVE_VALS on fd "
"%" CURL_FORMAT_SOCKET_T ": errno %d",
sockfd, SOCKERRNO);
}
-#else
+#endif
+#else /* !Windows */
#ifdef TCP_KEEPIDLE
optval = curlx_sltosi(data->set.tcp_keepidle);
KEEPALIVE_FACTOR(optval);
@@ -204,6 +244,16 @@ tcpkeepalive(struct Curl_easy *data,
"%" CURL_FORMAT_SOCKET_T ": errno %d",
sockfd, SOCKERRNO);
}
+#elif defined(TCP_KEEPALIVE_THRESHOLD)
+ /* Solaris <11.4 style */
+ optval = curlx_sltosi(data->set.tcp_keepidle);
+ KEEPALIVE_FACTOR(optval);
+ if(setsockopt(sockfd, IPPROTO_TCP, TCP_KEEPALIVE_THRESHOLD,
+ (void *)&optval, sizeof(optval)) < 0) {
+ infof(data, "Failed to set TCP_KEEPALIVE_THRESHOLD on fd "
+ "%" CURL_FORMAT_SOCKET_T ": errno %d",
+ sockfd, SOCKERRNO);
+ }
#endif
#ifdef TCP_KEEPINTVL
optval = curlx_sltosi(data->set.tcp_keepintvl);
@@ -214,6 +264,36 @@ tcpkeepalive(struct Curl_easy *data,
"%" CURL_FORMAT_SOCKET_T ": errno %d",
sockfd, SOCKERRNO);
}
+#elif defined(TCP_KEEPALIVE_ABORT_THRESHOLD)
+ /* Solaris <11.4 style */
+ /* TCP_KEEPALIVE_ABORT_THRESHOLD should equal to
+ * TCP_KEEPCNT * TCP_KEEPINTVL on other platforms.
+ * The default value of TCP_KEEPCNT is 9 on Linux,
+ * 8 on *BSD/macOS, 5 or 10 on Windows. We use the
+ * default config for Solaris <11.4 because there is
+ * no default value for TCP_KEEPCNT on Solaris 11.4.
+ *
+ * Note that the consequent probes will not be sent
+ * at equal intervals on Solaris, but will be sent
+ * using the exponential backoff algorithm. */
+ optval = curlx_sltosi(data->set.tcp_keepcnt) *
+ curlx_sltosi(data->set.tcp_keepintvl);
+ KEEPALIVE_FACTOR(optval);
+ if(setsockopt(sockfd, IPPROTO_TCP, TCP_KEEPALIVE_ABORT_THRESHOLD,
+ (void *)&optval, sizeof(optval)) < 0) {
+ infof(data, "Failed to set TCP_KEEPALIVE_ABORT_THRESHOLD on fd "
+ "%" CURL_FORMAT_SOCKET_T ": errno %d",
+ sockfd, SOCKERRNO);
+ }
+#endif
+#ifdef TCP_KEEPCNT
+ optval = curlx_sltosi(data->set.tcp_keepcnt);
+ if(setsockopt(sockfd, IPPROTO_TCP, TCP_KEEPCNT,
+ (void *)&optval, sizeof(optval)) < 0) {
+ infof(data, "Failed to set TCP_KEEPCNT on fd "
+ "%" CURL_FORMAT_SOCKET_T ": errno %d",
+ sockfd, SOCKERRNO);
+ }
#endif
#endif
}
@@ -249,7 +329,7 @@ void Curl_sock_assign_addr(struct Curl_sockaddr_ex *dest,
dest->protocol = IPPROTO_UDP;
break;
}
- dest->addrlen = ai->ai_addrlen;
+ dest->addrlen = (unsigned int)ai->ai_addrlen;
if(dest->addrlen > sizeof(struct Curl_sockaddr_storage))
dest->addrlen = sizeof(struct Curl_sockaddr_storage);
@@ -314,7 +394,7 @@ CURLcode Curl_socket_open(struct Curl_easy *data,
struct Curl_sockaddr_ex dummy;
if(!addr)
- /* if the caller doesn't want info back, use a local temp copy */
+ /* if the caller does not want info back, use a local temp copy */
addr = &dummy;
Curl_sock_assign_addr(addr, ai, transport);
@@ -363,14 +443,14 @@ int Curl_socket_close(struct Curl_easy *data, struct connectdata *conn,
Buffer Size
The problem described in this knowledge-base is applied only to pre-Vista
- Windows. Following function trying to detect OS version and skips
+ Windows. Following function trying to detect OS version and skips
SO_SNDBUF adjustment for Windows Vista and above.
*/
#define DETECT_OS_NONE 0
#define DETECT_OS_PREVISTA 1
#define DETECT_OS_VISTA_OR_LATER 2
-void Curl_sndbufset(curl_socket_t sockfd)
+void Curl_sndbuf_init(curl_socket_t sockfd)
{
int val = CURL_MAX_WRITE_SIZE + 32;
int curval = 0;
@@ -395,7 +475,83 @@ void Curl_sndbufset(curl_socket_t sockfd)
setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF, (const char *)&val, sizeof(val));
}
-#endif
+#endif /* USE_WINSOCK */
+
+/*
+ * Curl_parse_interface()
+ *
+ * This is used to parse interface argument in the following formats.
+ * In all the examples, `host` can be an IP address or a hostname.
+ *
+ * <iface_or_host> - can be either an interface name or a host.
+ * if!<iface> - interface name.
+ * host!<host> - hostname.
+ * ifhost!<iface>!<host> - interface name and hostname.
+ *
+ * Parameters:
+ *
+ * input [in] - input string.
+ * len [in] - length of the input string.
+ * dev [in/out] - address where a pointer to newly allocated memory
+ * holding the interface-or-host will be stored upon
+ * completion.
+ * iface [in/out] - address where a pointer to newly allocated memory
+ * holding the interface will be stored upon completion.
+ * host [in/out] - address where a pointer to newly allocated memory
+ * holding the host will be stored upon completion.
+ *
+ * Returns CURLE_OK on success.
+ */
+CURLcode Curl_parse_interface(const char *input, size_t len,
+ char **dev, char **iface, char **host)
+{
+ static const char if_prefix[] = "if!";
+ static const char host_prefix[] = "host!";
+ static const char if_host_prefix[] = "ifhost!";
+
+ DEBUGASSERT(dev);
+ DEBUGASSERT(iface);
+ DEBUGASSERT(host);
+
+ if(strncmp(if_prefix, input, strlen(if_prefix)) == 0) {
+ input += strlen(if_prefix);
+ if(!*input)
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ *iface = Curl_memdup0(input, len - strlen(if_prefix));
+ return *iface ? CURLE_OK : CURLE_OUT_OF_MEMORY;
+ }
+ if(strncmp(host_prefix, input, strlen(host_prefix)) == 0) {
+ input += strlen(host_prefix);
+ if(!*input)
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ *host = Curl_memdup0(input, len - strlen(host_prefix));
+ return *host ? CURLE_OK : CURLE_OUT_OF_MEMORY;
+ }
+ if(strncmp(if_host_prefix, input, strlen(if_host_prefix)) == 0) {
+ const char *host_part;
+ input += strlen(if_host_prefix);
+ len -= strlen(if_host_prefix);
+ host_part = memchr(input, '!', len);
+ if(!host_part || !*(host_part + 1))
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ *iface = Curl_memdup0(input, host_part - input);
+ if(!*iface)
+ return CURLE_OUT_OF_MEMORY;
+ ++host_part;
+ *host = Curl_memdup0(host_part, len - (host_part - input));
+ if(!*host) {
+ free(*iface);
+ *iface = NULL;
+ return CURLE_OUT_OF_MEMORY;
+ }
+ return CURLE_OK;
+ }
+
+ if(!*input)
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ *dev = Curl_memdup0(input, len);
+ return *dev ? CURLE_OK : CURLE_OUT_OF_MEMORY;
+}
#ifndef CURL_DISABLE_BINDLOCAL
static CURLcode bindlocal(struct Curl_easy *data, struct connectdata *conn,
@@ -415,6 +571,10 @@ static CURLcode bindlocal(struct Curl_easy *data, struct connectdata *conn,
/* how many port numbers to try to bind to, increasing one at a time */
int portnum = data->set.localportrange;
const char *dev = data->set.str[STRING_DEVICE];
+ const char *iface_input = data->set.str[STRING_INTERFACE];
+ const char *host_input = data->set.str[STRING_BINDHOST];
+ const char *iface = iface_input ? iface_input : dev;
+ const char *host = host_input ? host_input : dev;
int error;
#ifdef IP_BIND_ADDRESS_NO_PORT
int on = 1;
@@ -426,83 +586,77 @@ static CURLcode bindlocal(struct Curl_easy *data, struct connectdata *conn,
/*************************************************************
* Select device to bind socket to
*************************************************************/
- if(!dev && !port)
+ if(!iface && !host && !port)
/* no local kind of binding was requested */
return CURLE_OK;
memset(&sa, 0, sizeof(struct Curl_sockaddr_storage));
- if(dev && (strlen(dev)<255) ) {
+ if(iface && (strlen(iface)<255) ) {
char myhost[256] = "";
int done = 0; /* -1 for error, 1 for address found */
- bool is_interface = FALSE;
- bool is_host = FALSE;
- static const char *if_prefix = "if!";
- static const char *host_prefix = "host!";
-
- if(strncmp(if_prefix, dev, strlen(if_prefix)) == 0) {
- dev += strlen(if_prefix);
- is_interface = TRUE;
- }
- else if(strncmp(host_prefix, dev, strlen(host_prefix)) == 0) {
- dev += strlen(host_prefix);
- is_host = TRUE;
- }
+ if2ip_result_t if2ip_result = IF2IP_NOT_FOUND;
/* interface */
- if(!is_host) {
#ifdef SO_BINDTODEVICE
- /*
- * This binds the local socket to a particular interface. This will
- * force even requests to other local interfaces to go out the external
- * interface. Only bind to the interface when specified as interface,
- * not just as a hostname or ip address.
- *
- * The interface might be a VRF, eg: vrf-blue, which means it cannot be
- * converted to an IP address and would fail Curl_if2ip. Simply try to
- * use it straight away.
- */
- if(setsockopt(sockfd, SOL_SOCKET, SO_BINDTODEVICE,
- dev, (curl_socklen_t)strlen(dev) + 1) == 0) {
- /* This is often "errno 1, error: Operation not permitted" if you're
- * not running as root or another suitable privileged user. If it
- * succeeds it means the parameter was a valid interface and not an IP
- * address. Return immediately.
- */
- infof(data, "socket successfully bound to interface '%s'", dev);
+ /*
+ * This binds the local socket to a particular interface. This will
+ * force even requests to other local interfaces to go out the external
+ * interface. Only bind to the interface when specified as interface,
+ * not just as a hostname or ip address.
+ *
+ * The interface might be a VRF, eg: vrf-blue, which means it cannot be
+ * converted to an IP address and would fail Curl_if2ip. Simply try to
+ * use it straight away.
+ */
+ if(setsockopt(sockfd, SOL_SOCKET, SO_BINDTODEVICE,
+ iface, (curl_socklen_t)strlen(iface) + 1) == 0) {
+ /* This is often "errno 1, error: Operation not permitted" if you are
+ * not running as root or another suitable privileged user. If it
+ * succeeds it means the parameter was a valid interface and not an IP
+ * address. Return immediately.
+ */
+ if(!host_input) {
+ infof(data, "socket successfully bound to interface '%s'", iface);
return CURLE_OK;
}
+ }
#endif
-
- switch(Curl_if2ip(af,
+ if(!host_input) {
+ /* Discover IP from input device, then bind to it */
+ if2ip_result = Curl_if2ip(af,
#ifdef USE_IPV6
- scope, conn->scope_id,
-#endif
- dev, myhost, sizeof(myhost))) {
- case IF2IP_NOT_FOUND:
- if(is_interface) {
- /* Do not fall back to treating it as a host name */
- failf(data, "Couldn't bind to interface '%s'", dev);
- return CURLE_INTERFACE_FAILED;
- }
- break;
- case IF2IP_AF_NOT_SUPPORTED:
- /* Signal the caller to try another address family if available */
- return CURLE_UNSUPPORTED_PROTOCOL;
- case IF2IP_FOUND:
- is_interface = TRUE;
- /*
- * We now have the numerical IP address in the 'myhost' buffer
- */
- infof(data, "Local Interface %s is ip %s using address family %i",
- dev, myhost, af);
- done = 1;
- break;
- }
+ scope, conn->scope_id,
+#endif
+ iface, myhost, sizeof(myhost));
+ }
+ switch(if2ip_result) {
+ case IF2IP_NOT_FOUND:
+ if(iface_input && !host_input) {
+ /* Do not fall back to treating it as a hostname */
+ char buffer[STRERROR_LEN];
+ data->state.os_errno = error = SOCKERRNO;
+ failf(data, "Couldn't bind to interface '%s' with errno %d: %s",
+ iface, error, Curl_strerror(error, buffer, sizeof(buffer)));
+ return CURLE_INTERFACE_FAILED;
+ }
+ break;
+ case IF2IP_AF_NOT_SUPPORTED:
+ /* Signal the caller to try another address family if available */
+ return CURLE_UNSUPPORTED_PROTOCOL;
+ case IF2IP_FOUND:
+ /*
+ * We now have the numerical IP address in the 'myhost' buffer
+ */
+ host = myhost;
+ infof(data, "Local Interface %s is ip %s using address family %i",
+ iface, host, af);
+ done = 1;
+ break;
}
- if(!is_interface) {
+ if(!iface_input || host_input) {
/*
- * This was not an interface, resolve the name as a host name
+ * This was not an interface, resolve the name as a hostname
* or IP number
*
* Temporarily force name resolution to use only the address type
@@ -519,7 +673,7 @@ static CURLcode bindlocal(struct Curl_easy *data, struct connectdata *conn,
conn->ip_version = CURL_IPRESOLVE_V6;
#endif
- rc = Curl_resolv(data, dev, 80, FALSE, &h);
+ rc = Curl_resolv(data, host, 80, FALSE, &h);
if(rc == CURLRESOLV_PENDING)
(void)Curl_resolver_wait_resolv(data, &h);
conn->ip_version = ipver;
@@ -528,7 +682,7 @@ static CURLcode bindlocal(struct Curl_easy *data, struct connectdata *conn,
/* convert the resolved address, sizeof myhost >= INET_ADDRSTRLEN */
Curl_printable_address(h->addr, myhost, sizeof(myhost));
infof(data, "Name '%s' family %i resolved to '%s' family %i",
- dev, af, myhost, h->addr->ai_family);
+ host, af, myhost, h->addr->ai_family);
Curl_resolv_unlock(data, h);
if(af != h->addr->ai_family) {
/* bad IP version combo, signal the caller to try another address
@@ -562,7 +716,7 @@ static CURLcode bindlocal(struct Curl_easy *data, struct connectdata *conn,
if(scope_ptr) {
/* The "myhost" string either comes from Curl_if2ip or from
Curl_printable_address. The latter returns only numeric scope
- IDs and the former returns none at all. So the scope ID, if
+ IDs and the former returns none at all. So the scope ID, if
present, is known to be numeric */
unsigned long scope_id = strtoul(scope_ptr, NULL, 10);
if(scope_id > UINT_MAX)
@@ -589,8 +743,11 @@ static CURLcode bindlocal(struct Curl_easy *data, struct connectdata *conn,
/* errorbuf is set false so failf will overwrite any message already in
the error buffer, so the user receives this error message instead of a
generic resolve error. */
+ char buffer[STRERROR_LEN];
data->state.errorbuf = FALSE;
- failf(data, "Couldn't bind to '%s'", dev);
+ data->state.os_errno = error = SOCKERRNO;
+ failf(data, "Couldn't bind to '%s' with errno %d: %s",
+ host, error, Curl_strerror(error, buffer, sizeof(buffer)));
return CURLE_INTERFACE_FAILED;
}
}
@@ -667,8 +824,8 @@ static bool verifyconnect(curl_socket_t sockfd, int *error)
* Gisle Vanem could reproduce the former problems with this function, but
* could avoid them by adding this SleepEx() call below:
*
- * "I don't have Rational Quantify, but the hint from his post was
- * ntdll::NtRemoveIoCompletion(). So I'd assume the SleepEx (or maybe
+ * "I do not have Rational Quantify, but the hint from his post was
+ * ntdll::NtRemoveIoCompletion(). I would assume the SleepEx (or maybe
* just Sleep(0) would be enough?) would release whatever
* mutex/critical-section the ntdll call is waiting on.
*
@@ -686,14 +843,14 @@ static bool verifyconnect(curl_socket_t sockfd, int *error)
if(0 != getsockopt(sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &errSize))
err = SOCKERRNO;
#ifdef _WIN32_WCE
- /* Old WinCE versions don't support SO_ERROR */
+ /* Old WinCE versions do not support SO_ERROR */
if(WSAENOPROTOOPT == err) {
SET_SOCKERRNO(0);
err = 0;
}
#endif
#if defined(EBADIOCTL) && defined(__minix)
- /* Minix 3.1.x doesn't support getsockopt on UDP sockets */
+ /* Minix 3.1.x does not support getsockopt on UDP sockets */
if(EBADIOCTL == err) {
SET_SOCKERRNO(0);
err = 0;
@@ -703,7 +860,7 @@ static bool verifyconnect(curl_socket_t sockfd, int *error)
/* we are connected, awesome! */
rc = TRUE;
else
- /* This wasn't a successful connect */
+ /* This was not a successful connect */
rc = FALSE;
if(error)
*error = err;
@@ -765,11 +922,14 @@ struct cf_socket_ctx {
int transport;
struct Curl_sockaddr_ex addr; /* address to connect to */
curl_socket_t sock; /* current attempt socket */
- struct bufq recvbuf; /* used when `buffer_recv` is set */
struct ip_quadruple ip; /* The IP quadruple 2x(addr+port) */
struct curltime started_at; /* when socket was created */
struct curltime connected_at; /* when socket connected/got first byte */
struct curltime first_byte_at; /* when first byte was recvd */
+#ifdef USE_WINSOCK
+ struct curltime last_sndbuf_query_at; /* when SO_SNDBUF last queried */
+ ULONG sndbuf_size; /* the last set SO_SNDBUF size */
+#endif
int error; /* errno of last failure or 0 */
#ifdef DEBUGBUILD
int wblock_percent; /* percent of writes doing EAGAIN */
@@ -781,7 +941,6 @@ struct cf_socket_ctx {
BIT(accepted); /* socket was accepted, not connected */
BIT(sock_connected); /* socket is "connected", e.g. in UDP */
BIT(active);
- BIT(buffer_recv);
};
static void cf_socket_ctx_init(struct cf_socket_ctx *ctx,
@@ -792,7 +951,6 @@ static void cf_socket_ctx_init(struct cf_socket_ctx *ctx,
ctx->sock = CURL_SOCKET_BAD;
ctx->transport = transport;
Curl_sock_assign_addr(&ctx->addr, ai, transport);
- Curl_bufq_init(&ctx->recvbuf, NW_RECV_CHUNK_SIZE, NW_RECV_CHUNKS);
#ifdef DEBUGBUILD
{
char *p = getenv("CURL_DBG_SOCK_WBLOCK");
@@ -823,56 +981,6 @@ static void cf_socket_ctx_init(struct cf_socket_ctx *ctx,
#endif
}
-struct reader_ctx {
- struct Curl_cfilter *cf;
- struct Curl_easy *data;
-};
-
-static ssize_t nw_in_read(void *reader_ctx,
- unsigned char *buf, size_t len,
- CURLcode *err)
-{
- struct reader_ctx *rctx = reader_ctx;
- struct cf_socket_ctx *ctx = rctx->cf->ctx;
- ssize_t nread;
-
- *err = CURLE_OK;
- nread = sread(ctx->sock, buf, len);
-
- if(-1 == nread) {
- int sockerr = SOCKERRNO;
-
- if(
-#ifdef WSAEWOULDBLOCK
- /* This is how Windows does it */
- (WSAEWOULDBLOCK == sockerr)
-#else
- /* errno may be EWOULDBLOCK or on some systems EAGAIN when it returned
- due to its inability to send off data without blocking. We therefore
- treat both error codes the same here */
- (EWOULDBLOCK == sockerr) || (EAGAIN == sockerr) || (EINTR == sockerr)
-#endif
- ) {
- /* this is just a case of EWOULDBLOCK */
- *err = CURLE_AGAIN;
- nread = -1;
- }
- else {
- char buffer[STRERROR_LEN];
-
- failf(rctx->data, "Recv failure: %s",
- Curl_strerror(sockerr, buffer, sizeof(buffer)));
- rctx->data->state.os_errno = sockerr;
- *err = CURLE_RECV_ERROR;
- nread = -1;
- }
- }
- CURL_TRC_CF(rctx->data, rctx->cf, "nw_in_read(len=%zu, fd=%"
- CURL_FORMAT_SOCKET_T ") -> %d, err=%d",
- len, ctx->sock, (int)nread, *err);
- return nread;
-}
-
static void cf_socket_close(struct Curl_cfilter *cf, struct Curl_easy *data)
{
struct cf_socket_ctx *ctx = cf->ctx;
@@ -886,9 +994,7 @@ static void cf_socket_close(struct Curl_cfilter *cf, struct Curl_easy *data)
ctx->sock = CURL_SOCKET_BAD;
if(ctx->active && cf->sockindex == FIRSTSOCKET)
cf->conn->remote_addr = NULL;
- Curl_bufq_reset(&ctx->recvbuf);
ctx->active = FALSE;
- ctx->buffer_recv = FALSE;
memset(&ctx->started_at, 0, sizeof(ctx->started_at));
memset(&ctx->connected_at, 0, sizeof(ctx->connected_at));
}
@@ -896,13 +1002,35 @@ static void cf_socket_close(struct Curl_cfilter *cf, struct Curl_easy *data)
cf->connected = FALSE;
}
+static CURLcode cf_socket_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ bool *done)
+{
+ if(cf->connected) {
+ struct cf_socket_ctx *ctx = cf->ctx;
+
+ CURL_TRC_CF(data, cf, "cf_socket_shutdown(%" CURL_FORMAT_SOCKET_T
+ ")", ctx->sock);
+ /* On TCP, and when the socket looks well and non-blocking mode
+ * can be enabled, receive dangling bytes before close to avoid
+ * entering RST states unnecessarily. */
+ if(ctx->sock != CURL_SOCKET_BAD &&
+ ctx->transport == TRNSPRT_TCP &&
+ (curlx_nonblock(ctx->sock, TRUE) >= 0)) {
+ unsigned char buf[1024];
+ (void)sread(ctx->sock, buf, sizeof(buf));
+ }
+ }
+ *done = TRUE;
+ return CURLE_OK;
+}
+
static void cf_socket_destroy(struct Curl_cfilter *cf, struct Curl_easy *data)
{
struct cf_socket_ctx *ctx = cf->ctx;
cf_socket_close(cf, data);
CURL_TRC_CF(data, cf, "destroy");
- Curl_bufq_free(&ctx->recvbuf);
free(ctx);
cf->ctx = NULL;
}
@@ -949,7 +1077,7 @@ static CURLcode set_remote_ip(struct Curl_cfilter *cf,
struct cf_socket_ctx *ctx = cf->ctx;
/* store remote address and port used in this connection attempt */
- if(!Curl_addr2string(&ctx->addr.sa_addr, ctx->addr.addrlen,
+ if(!Curl_addr2string(&ctx->addr.sa_addr, (curl_socklen_t)ctx->addr.addrlen,
ctx->ip.remote_ip, &ctx->ip.remote_port)) {
char buffer[STRERROR_LEN];
@@ -974,7 +1102,20 @@ static CURLcode cf_socket_open(struct Curl_cfilter *cf,
(void)data;
DEBUGASSERT(ctx->sock == CURL_SOCKET_BAD);
ctx->started_at = Curl_now();
+#ifdef SOCK_NONBLOCK
+ /* Do not tuck SOCK_NONBLOCK into socktype when opensocket callback is set
+ * because we would not know how socketype is about to be used in the
+ * callback, SOCK_NONBLOCK might get factored out before calling socket().
+ */
+ if(!data->set.fopensocket)
+ ctx->addr.socktype |= SOCK_NONBLOCK;
+#endif
result = socket_open(data, &ctx->addr, &ctx->sock);
+#ifdef SOCK_NONBLOCK
+ /* Restore the socktype after the socket is created. */
+ if(!data->set.fopensocket)
+ ctx->addr.socktype &= ~SOCK_NONBLOCK;
+#endif
if(result)
goto out;
@@ -1004,7 +1145,7 @@ static CURLcode cf_socket_open(struct Curl_cfilter *cf,
nosigpipe(data, ctx->sock);
- Curl_sndbufset(ctx->sock);
+ Curl_sndbuf_init(ctx->sock);
if(is_tcp && data->set.tcp_keepalive)
tcpkeepalive(data, ctx->sock);
@@ -1045,8 +1186,27 @@ static CURLcode cf_socket_open(struct Curl_cfilter *cf,
}
#endif
- /* set socket non-blocking */
- (void)curlx_nonblock(ctx->sock, TRUE);
+#ifndef SOCK_NONBLOCK
+ /* Set socket non-blocking, must be a non-blocking socket for
+ * a non-blocking connect. */
+ error = curlx_nonblock(ctx->sock, TRUE);
+ if(error < 0) {
+ result = CURLE_UNSUPPORTED_PROTOCOL;
+ ctx->error = SOCKERRNO;
+ goto out;
+ }
+#else
+ if(data->set.fopensocket) {
+ /* Set socket non-blocking, must be a non-blocking socket for
+ * a non-blocking connect. */
+ error = curlx_nonblock(ctx->sock, TRUE);
+ if(error < 0) {
+ result = CURLE_UNSUPPORTED_PROTOCOL;
+ ctx->error = SOCKERRNO;
+ goto out;
+ }
+ }
+#endif
ctx->sock_connected = (ctx->addr.socktype != SOCK_DGRAM);
out:
if(result) {
@@ -1114,7 +1274,8 @@ static int do_connect(struct Curl_cfilter *cf, struct Curl_easy *data,
#endif
}
else {
- rc = connect(ctx->sock, &ctx->addr.sa_addr, ctx->addr.addrlen);
+ rc = connect(ctx->sock, &ctx->addr.sa_addr,
+ (curl_socklen_t)ctx->addr.addrlen);
}
return rc;
}
@@ -1257,13 +1418,36 @@ static bool cf_socket_data_pending(struct Curl_cfilter *cf,
int readable;
(void)data;
- if(!Curl_bufq_is_empty(&ctx->recvbuf))
- return TRUE;
-
readable = SOCKET_READABLE(ctx->sock, 0);
return (readable > 0 && (readable & CURL_CSELECT_IN));
}
+#ifdef USE_WINSOCK
+
+#ifndef SIO_IDEAL_SEND_BACKLOG_QUERY
+#define SIO_IDEAL_SEND_BACKLOG_QUERY 0x4004747B
+#endif
+
+static void win_update_sndbuf_size(struct cf_socket_ctx *ctx)
+{
+ ULONG ideal;
+ DWORD ideallen;
+ struct curltime n = Curl_now();
+
+ if(Curl_timediff(n, ctx->last_sndbuf_query_at) > 1000) {
+ if(!WSAIoctl(ctx->sock, SIO_IDEAL_SEND_BACKLOG_QUERY, 0, 0,
+ &ideal, sizeof(ideal), &ideallen, 0, 0) &&
+ ideal != ctx->sndbuf_size &&
+ !setsockopt(ctx->sock, SOL_SOCKET, SO_SNDBUF,
+ (const char *)&ideal, sizeof(ideal))) {
+ ctx->sndbuf_size = ideal;
+ }
+ ctx->last_sndbuf_query_at = n;
+ }
+}
+
+#endif /* USE_WINSOCK */
+
static ssize_t cf_socket_send(struct Curl_cfilter *cf, struct Curl_easy *data,
const void *buf, size_t len, CURLcode *err)
{
@@ -1336,6 +1520,11 @@ static ssize_t cf_socket_send(struct Curl_cfilter *cf, struct Curl_easy *data,
}
}
+#if defined(USE_WINSOCK)
+ if(!*err)
+ win_update_sndbuf_size(ctx);
+#endif
+
CURL_TRC_CF(data, cf, "send(len=%zu) -> %d, err=%d",
orig_len, (int)nwritten, *err);
cf->conn->sock[cf->sockindex] = fdsave;
@@ -1346,14 +1535,10 @@ static ssize_t cf_socket_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
char *buf, size_t len, CURLcode *err)
{
struct cf_socket_ctx *ctx = cf->ctx;
- curl_socket_t fdsave;
ssize_t nread;
*err = CURLE_OK;
- fdsave = cf->conn->sock[cf->sockindex];
- cf->conn->sock[cf->sockindex] = ctx->sock;
-
#ifdef DEBUGBUILD
/* simulate network blocking/partial reads */
if(cf->cft != &Curl_cft_udp && ctx->rblock_percent > 0) {
@@ -1362,9 +1547,7 @@ static ssize_t cf_socket_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
if(c >= ((100-ctx->rblock_percent)*256/100)) {
CURL_TRC_CF(data, cf, "recv(len=%zu) SIMULATE EWOULDBLOCK", len);
*err = CURLE_AGAIN;
- nread = -1;
- cf->conn->sock[cf->sockindex] = fdsave;
- return nread;
+ return -1;
}
}
if(cf->cft != &Curl_cft_udp && ctx->recv_max && ctx->recv_max < len) {
@@ -1375,54 +1558,42 @@ static ssize_t cf_socket_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
}
#endif
- if(ctx->buffer_recv && !Curl_bufq_is_empty(&ctx->recvbuf)) {
- CURL_TRC_CF(data, cf, "recv from buffer");
- nread = Curl_bufq_read(&ctx->recvbuf, (unsigned char *)buf, len, err);
- }
- else {
- struct reader_ctx rctx;
-
- rctx.cf = cf;
- rctx.data = data;
-
- /* "small" reads may trigger filling our buffer, "large" reads
- * are probably not worth the additional copy */
- if(ctx->buffer_recv && len < NW_SMALL_READS) {
- ssize_t nwritten;
- nwritten = Curl_bufq_slurp(&ctx->recvbuf, nw_in_read, &rctx, err);
- if(nwritten < 0 && !Curl_bufq_is_empty(&ctx->recvbuf)) {
- /* we have a partial read with an error. need to deliver
- * what we got, return the error later. */
- CURL_TRC_CF(data, cf, "partial read: empty buffer first");
- nread = Curl_bufq_read(&ctx->recvbuf, (unsigned char *)buf, len, err);
- }
- else if(nwritten < 0) {
- nread = -1;
- goto out;
- }
- else if(nwritten == 0) {
- /* eof */
- *err = CURLE_OK;
- nread = 0;
- }
- else {
- CURL_TRC_CF(data, cf, "buffered %zd additional bytes", nwritten);
- nread = Curl_bufq_read(&ctx->recvbuf, (unsigned char *)buf, len, err);
- }
+ *err = CURLE_OK;
+ nread = sread(ctx->sock, buf, len);
+
+ if(-1 == nread) {
+ int sockerr = SOCKERRNO;
+
+ if(
+#ifdef WSAEWOULDBLOCK
+ /* This is how Windows does it */
+ (WSAEWOULDBLOCK == sockerr)
+#else
+ /* errno may be EWOULDBLOCK or on some systems EAGAIN when it returned
+ due to its inability to send off data without blocking. We therefore
+ treat both error codes the same here */
+ (EWOULDBLOCK == sockerr) || (EAGAIN == sockerr) || (EINTR == sockerr)
+#endif
+ ) {
+ /* this is just a case of EWOULDBLOCK */
+ *err = CURLE_AGAIN;
}
else {
- nread = nw_in_read(&rctx, (unsigned char *)buf, len, err);
+ char buffer[STRERROR_LEN];
+
+ failf(data, "Recv failure: %s",
+ Curl_strerror(sockerr, buffer, sizeof(buffer)));
+ data->state.os_errno = sockerr;
+ *err = CURLE_RECV_ERROR;
}
}
-out:
CURL_TRC_CF(data, cf, "recv(len=%zu) -> %d, err=%d", len, (int)nread,
*err);
if(nread > 0 && !ctx->got_first_byte) {
ctx->first_byte_at = Curl_now();
ctx->got_first_byte = TRUE;
}
- cf->conn->sock[cf->sockindex] = fdsave;
return nread;
}
@@ -1444,11 +1615,6 @@ static void cf_socket_active(struct Curl_cfilter *cf, struct Curl_easy *data)
cf->conn->bits.ipv6 = (ctx->addr.family == AF_INET6)? TRUE : FALSE;
#endif
Curl_persistconninfo(data, cf->conn, &ctx->ip);
- /* buffering is currently disabled by default because we have stalls
- * in parallel transfers where not all buffered data is consumed and no
- * socket events happen.
- */
- ctx->buffer_recv = FALSE;
}
ctx->active = TRUE;
}
@@ -1564,6 +1730,7 @@ struct Curl_cftype Curl_cft_tcp = {
cf_socket_destroy,
cf_tcp_connect,
cf_socket_close,
+ cf_socket_shutdown,
cf_socket_get_host,
cf_socket_adjust_pollset,
cf_socket_data_pending,
@@ -1608,25 +1775,23 @@ out:
}
static CURLcode cf_udp_setup_quic(struct Curl_cfilter *cf,
- struct Curl_easy *data)
+ struct Curl_easy *data)
{
struct cf_socket_ctx *ctx = cf->ctx;
int rc;
+ int one = 1;
+
+ (void)one;
/* QUIC needs a connected socket, nonblocking */
DEBUGASSERT(ctx->sock != CURL_SOCKET_BAD);
-#if defined(__APPLE__) && defined(USE_OPENSSL_QUIC)
- (void)rc;
- /* On macOS OpenSSL QUIC fails on connected sockets.
- * see: <https://github.com/openssl/openssl/issues/23251> */
-#else
- rc = connect(ctx->sock, &ctx->addr.sa_addr, ctx->addr.addrlen);
+ rc = connect(ctx->sock, &ctx->addr.sa_addr,
+ (curl_socklen_t)ctx->addr.addrlen);
if(-1 == rc) {
return socket_connect_result(data, ctx->ip.remote_ip, SOCKERRNO);
}
ctx->sock_connected = TRUE;
-#endif
set_local_ip(cf, data);
CURL_TRC_CF(data, cf, "%s socket %" CURL_FORMAT_SOCKET_T
" connected: [%s:%d] -> [%s:%d]",
@@ -1634,7 +1799,11 @@ static CURLcode cf_udp_setup_quic(struct Curl_cfilter *cf,
ctx->sock, ctx->ip.local_ip, ctx->ip.local_port,
ctx->ip.remote_ip, ctx->ip.remote_port);
- (void)curlx_nonblock(ctx->sock, TRUE);
+ /* Currently, cf->ctx->sock is always non-blocking because the only
+ * caller to cf_udp_setup_quic() is cf_udp_connect() that passes the
+ * non-blocking socket created by cf_socket_open() to it. Thus, we
+ * do not need to call curlx_nonblock() in cf_udp_setup_quic() anymore.
+ */
switch(ctx->addr.family) {
#if defined(__linux__) && defined(IP_MTU_DISCOVER)
case AF_INET: {
@@ -1653,6 +1822,14 @@ static CURLcode cf_udp_setup_quic(struct Curl_cfilter *cf,
}
#endif
}
+
+#if defined(__linux__) && defined(UDP_GRO) && \
+ (defined(HAVE_SENDMMSG) || defined(HAVE_SENDMSG)) && \
+ ((defined(USE_NGTCP2) && defined(USE_NGHTTP3)) || defined(USE_QUICHE))
+ (void)setsockopt(ctx->sock, IPPROTO_UDP, UDP_GRO, &one,
+ (socklen_t)sizeof(one));
+#endif
+
return CURLE_OK;
}
@@ -1702,6 +1879,7 @@ struct Curl_cftype Curl_cft_udp = {
cf_socket_destroy,
cf_udp_connect,
cf_socket_close,
+ cf_socket_shutdown,
cf_socket_get_host,
cf_socket_adjust_pollset,
cf_socket_data_pending,
@@ -1753,6 +1931,7 @@ struct Curl_cftype Curl_cft_unix = {
cf_socket_destroy,
cf_tcp_connect,
cf_socket_close,
+ cf_socket_shutdown,
cf_socket_get_host,
cf_socket_adjust_pollset,
cf_socket_data_pending,
@@ -1817,6 +1996,7 @@ struct Curl_cftype Curl_cft_tcp_accept = {
cf_socket_destroy,
cf_tcp_accept_connect,
cf_socket_close,
+ cf_socket_shutdown,
cf_socket_get_host, /* TODO: not accurate */
cf_socket_adjust_pollset,
cf_socket_data_pending,
diff --git a/libs/libcurl/src/cf-socket.h b/libs/libcurl/src/cf-socket.h
index 2e175d1a30..65a5435040 100644
--- a/libs/libcurl/src/cf-socket.h
+++ b/libs/libcurl/src/cf-socket.h
@@ -54,6 +54,11 @@ struct Curl_sockaddr_ex {
};
#define sa_addr _sa_ex_u.addr
+/*
+ * Parse interface option, and return the interface name and the host part.
+*/
+CURLcode Curl_parse_interface(const char *input, size_t len,
+ char **dev, char **iface, char **host);
/*
* Create a socket based on info from 'conn' and 'ai'.
@@ -81,9 +86,9 @@ int Curl_socket_close(struct Curl_easy *data, struct connectdata *conn,
Buffer Size
*/
-void Curl_sndbufset(curl_socket_t sockfd);
+void Curl_sndbuf_init(curl_socket_t sockfd);
#else
-#define Curl_sndbufset(y) Curl_nop_stmt
+#define Curl_sndbuf_init(y) Curl_nop_stmt
#endif
/**
diff --git a/libs/libcurl/src/cfilters.c b/libs/libcurl/src/cfilters.c
index 4bb958cf3a..d10c6c1ed0 100644
--- a/libs/libcurl/src/cfilters.c
+++ b/libs/libcurl/src/cfilters.c
@@ -45,7 +45,7 @@
#define ARRAYSIZE(A) (sizeof(A)/sizeof((A)[0]))
#endif
-#ifdef DEBUGBUILD
+#ifdef UNITTESTS
/* used by unit2600.c */
void Curl_cf_def_close(struct Curl_cfilter *cf, struct Curl_easy *data)
{
@@ -55,6 +55,15 @@ void Curl_cf_def_close(struct Curl_cfilter *cf, struct Curl_easy *data)
}
#endif
+CURLcode Curl_cf_def_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data, bool *done)
+{
+ (void)cf;
+ (void)data;
+ *done = TRUE;
+ return CURLE_OK;
+}
+
static void conn_report_connect_stats(struct Curl_easy *data,
struct connectdata *conn);
@@ -166,6 +175,61 @@ void Curl_conn_close(struct Curl_easy *data, int index)
if(cf) {
cf->cft->do_close(cf, data);
}
+ Curl_shutdown_clear(data, index);
+}
+
+CURLcode Curl_conn_shutdown(struct Curl_easy *data, int sockindex, bool *done)
+{
+ struct Curl_cfilter *cf;
+ CURLcode result = CURLE_OK;
+ timediff_t timeout_ms;
+ struct curltime now;
+
+ DEBUGASSERT(data->conn);
+ /* Get the first connected filter that is not shut down already. */
+ cf = data->conn->cfilter[sockindex];
+ while(cf && (!cf->connected || cf->shutdown))
+ cf = cf->next;
+
+ if(!cf) {
+ *done = TRUE;
+ return CURLE_OK;
+ }
+
+ *done = FALSE;
+ now = Curl_now();
+ if(!Curl_shutdown_started(data, sockindex)) {
+ DEBUGF(infof(data, "shutdown start on%s connection",
+ sockindex? " secondary" : ""));
+ Curl_shutdown_start(data, sockindex, &now);
+ }
+ else {
+ timeout_ms = Curl_shutdown_timeleft(data->conn, sockindex, &now);
+ if(timeout_ms < 0) {
+ failf(data, "SSL shutdown timeout");
+ return CURLE_OPERATION_TIMEDOUT;
+ }
+ }
+
+ while(cf) {
+ if(!cf->shutdown) {
+ bool cfdone = FALSE;
+ result = cf->cft->do_shutdown(cf, data, &cfdone);
+ if(result) {
+ CURL_TRC_CF(data, cf, "shut down failed with %d", result);
+ return result;
+ }
+ else if(!cfdone) {
+ CURL_TRC_CF(data, cf, "shut down not done yet");
+ return CURLE_OK;
+ }
+ CURL_TRC_CF(data, cf, "shut down successfully");
+ cf->shutdown = TRUE;
+ }
+ cf = cf->next;
+ }
+ *done = (!result);
+ return result;
}
ssize_t Curl_cf_recv(struct Curl_easy *data, int num, char *buf,
@@ -345,8 +409,10 @@ CURLcode Curl_conn_connect(struct Curl_easy *data,
cf = data->conn->cfilter[sockindex];
DEBUGASSERT(cf);
- if(!cf)
+ if(!cf) {
+ *done = FALSE;
return CURLE_FAILED_INIT;
+ }
*done = cf->connected;
if(!*done) {
@@ -442,6 +508,9 @@ void Curl_conn_cf_adjust_pollset(struct Curl_cfilter *cf,
/* Get the lowest not-connected filter, if there are any */
while(cf && !cf->connected && cf->next && !cf->next->connected)
cf = cf->next;
+ /* Skip all filters that have already shut down */
+ while(cf && cf->shutdown)
+ cf = cf->next;
/* From there on, give all filters a chance to adjust the pollset.
* Lower filters are called later, so they may override */
while(cf) {
@@ -462,6 +531,42 @@ void Curl_conn_adjust_pollset(struct Curl_easy *data,
}
}
+int Curl_conn_cf_poll(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ timediff_t timeout_ms)
+{
+ struct easy_pollset ps;
+ struct pollfd pfds[MAX_SOCKSPEREASYHANDLE];
+ unsigned int i, npfds = 0;
+
+ DEBUGASSERT(cf);
+ DEBUGASSERT(data);
+ DEBUGASSERT(data->conn);
+ memset(&ps, 0, sizeof(ps));
+ memset(pfds, 0, sizeof(pfds));
+
+ Curl_conn_cf_adjust_pollset(cf, data, &ps);
+ DEBUGASSERT(ps.num <= MAX_SOCKSPEREASYHANDLE);
+ for(i = 0; i < ps.num; ++i) {
+ short events = 0;
+ if(ps.actions[i] & CURL_POLL_IN) {
+ events |= POLLIN;
+ }
+ if(ps.actions[i] & CURL_POLL_OUT) {
+ events |= POLLOUT;
+ }
+ if(events) {
+ pfds[npfds].fd = ps.sockets[i];
+ pfds[npfds].events = events;
+ ++npfds;
+ }
+ }
+
+ if(!npfds)
+ DEBUGF(infof(data, "no sockets to poll!"));
+ return Curl_poll(pfds, npfds, timeout_ms);
+}
+
void Curl_conn_get_host(struct Curl_easy *data, int sockindex,
const char **phost, const char **pdisplay_host,
int *pport)
@@ -718,7 +823,7 @@ CURLcode Curl_conn_send(struct Curl_easy *data, int sockindex,
DEBUGASSERT(data);
DEBUGASSERT(data->conn);
conn = data->conn;
-#ifdef CURLDEBUG
+#ifdef DEBUGBUILD
{
/* Allow debug builds to override this logic to force short sends
*/
diff --git a/libs/libcurl/src/cfilters.h b/libs/libcurl/src/cfilters.h
index 040c9e88ee..87b2f746a9 100644
--- a/libs/libcurl/src/cfilters.h
+++ b/libs/libcurl/src/cfilters.h
@@ -24,6 +24,7 @@
*
***************************************************************************/
+#include "timediff.h"
struct Curl_cfilter;
struct Curl_easy;
@@ -36,9 +37,17 @@ struct connectdata;
typedef void Curl_cft_destroy_this(struct Curl_cfilter *cf,
struct Curl_easy *data);
+/* Callback to close the connection immediately. */
typedef void Curl_cft_close(struct Curl_cfilter *cf,
struct Curl_easy *data);
+/* Callback to close the connection filter gracefully, non-blocking.
+ * Implementations MUST NOT chain calls to cf->next.
+ */
+typedef CURLcode Curl_cft_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ bool *done);
+
typedef CURLcode Curl_cft_connect(struct Curl_cfilter *cf,
struct Curl_easy *data,
bool blocking, bool *done);
@@ -76,10 +85,10 @@ struct easy_pollset;
* the pollset. Filters, whose filter "below" is not connected, should
* also do no adjustments.
*
- * Examples: a TLS handshake, while ongoing, might remove POLL_IN
- * when it needs to write, or vice versa. A HTTP/2 filter might remove
- * POLL_OUT when a stream window is exhausted and a WINDOW_UPDATE needs
- * to be received first and add instead POLL_IN.
+ * Examples: a TLS handshake, while ongoing, might remove POLL_IN when it
+ * needs to write, or vice versa. An HTTP/2 filter might remove POLL_OUT when
+ * a stream window is exhausted and a WINDOW_UPDATE needs to be received first
+ * and add instead POLL_IN.
*
* @param cf the filter to ask
* @param data the easy handle the pollset is about
@@ -194,6 +203,7 @@ struct Curl_cftype {
Curl_cft_destroy_this *destroy; /* destroy resources of this cf */
Curl_cft_connect *do_connect; /* establish connection */
Curl_cft_close *do_close; /* close conn */
+ Curl_cft_shutdown *do_shutdown; /* shutdown conn */
Curl_cft_get_host *get_host; /* host filter talks to */
Curl_cft_adjust_pollset *adjust_pollset; /* adjust transfer poll set */
Curl_cft_data_pending *has_data_pending;/* conn has data pending */
@@ -213,6 +223,7 @@ struct Curl_cfilter {
struct connectdata *conn; /* the connection this filter belongs to */
int sockindex; /* the index the filter is installed at */
BIT(connected); /* != 0 iff this filter is connected */
+ BIT(shutdown); /* != 0 iff this filter has shut down */
};
/* Default implementations for the type functions, implementing nop. */
@@ -244,6 +255,8 @@ CURLcode Curl_cf_def_conn_keep_alive(struct Curl_cfilter *cf,
CURLcode Curl_cf_def_query(struct Curl_cfilter *cf,
struct Curl_easy *data,
int query, int *pres1, void *pres2);
+CURLcode Curl_cf_def_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data, bool *done);
/**
* Create a new filter instance, unattached to the filter chain.
@@ -372,6 +385,13 @@ bool Curl_conn_is_multiplex(struct connectdata *conn, int sockindex);
void Curl_conn_close(struct Curl_easy *data, int sockindex);
/**
+ * Shutdown the connection at `sockindex` non-blocking, using timeout
+ * from `data->set.shutdowntimeout`, default DEFAULT_SHUTDOWN_TIMEOUT_MS.
+ * Will return CURLE_OK and *done == FALSE if not finished.
+ */
+CURLcode Curl_conn_shutdown(struct Curl_easy *data, int sockindex, bool *done);
+
+/**
* Return if data is pending in some connection filter at chain
* `sockindex` for connection `data->conn`.
*/
@@ -403,6 +423,15 @@ void Curl_conn_adjust_pollset(struct Curl_easy *data,
struct easy_pollset *ps);
/**
+ * Curl_poll() the filter chain at `cf` with timeout `timeout_ms`.
+ * Returns 0 on timeout, negative on error or number of sockets
+ * with requested poll events.
+ */
+int Curl_conn_cf_poll(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ timediff_t timeout_ms);
+
+/**
* Receive data through the filter chain at `sockindex` for connection
* `data->conn`. Copy at most `len` bytes into `buf`. Return the
* actual number of bytes copied or a negative value on error.
@@ -486,7 +515,9 @@ CURLcode Curl_conn_keep_alive(struct Curl_easy *data,
struct connectdata *conn,
int sockindex);
+#ifdef UNITTESTS
void Curl_cf_def_close(struct Curl_cfilter *cf, struct Curl_easy *data);
+#endif
void Curl_conn_get_host(struct Curl_easy *data, int sockindex,
const char **phost, const char **pdisplay_host,
int *pport);
diff --git a/libs/libcurl/src/config-os400.h b/libs/libcurl/src/config-os400.h
index d33185a390..6e410ac7ce 100644
--- a/libs/libcurl/src/config-os400.h
+++ b/libs/libcurl/src/config-os400.h
@@ -107,9 +107,6 @@
/* Define if you have the <io.h> header file. */
#undef HAVE_IO_H
-/* Define if you have the `socket' library (-lsocket). */
-#undef HAVE_LIBSOCKET
-
/* Define if you have GSS API. */
#define HAVE_GSSAPI
diff --git a/libs/libcurl/src/config-riscos.h b/libs/libcurl/src/config-riscos.h
index fd3f67e59f..f537b58c2a 100644
--- a/libs/libcurl/src/config-riscos.h
+++ b/libs/libcurl/src/config-riscos.h
@@ -108,9 +108,6 @@
/* Define if you have the <io.h> header file. */
#undef HAVE_IO_H
-/* Define if you have the `socket' library (-lsocket). */
-#undef HAVE_LIBSOCKET
-
/* Define if you need the malloc.h header file even with stdlib.h */
/* #define NEED_MALLOC_H 1 */
diff --git a/libs/libcurl/src/config-win32.h b/libs/libcurl/src/config-win32.h
index 6b61216c9b..17924e3dec 100644
--- a/libs/libcurl/src/config-win32.h
+++ b/libs/libcurl/src/config-win32.h
@@ -158,10 +158,6 @@
/* Define if you have the socket function. */
#define HAVE_SOCKET 1
-/* Define if libSSH2 is in use */
-#define USE_LIBSSH2 1
-#define HAVE_LIBSSH2_H 1
-
/* Define if you have the strcasecmp function. */
#if defined(__MINGW32__)
#define HAVE_STRCASECMP 1
@@ -478,9 +474,6 @@ Vista
#define USE_WIN32_LDAP 1
#endif
-/* if SSL is enabled */
-#define USE_OPENSSL 1
-
/* Define to use the Windows crypto library. */
#if !defined(CURL_WINDOWS_APP)
#define USE_WIN32_CRYPTO
diff --git a/libs/libcurl/src/conncache.c b/libs/libcurl/src/conncache.c
index 00bb601cab..87ddfd5e71 100644
--- a/libs/libcurl/src/conncache.c
+++ b/libs/libcurl/src/conncache.c
@@ -29,13 +29,17 @@
#include "urldata.h"
#include "url.h"
+#include "cfilters.h"
#include "progress.h"
#include "multiif.h"
#include "sendf.h"
#include "conncache.h"
+#include "http_negotiate.h"
+#include "http_ntlm.h"
#include "share.h"
#include "sigpipe.h"
#include "connect.h"
+#include "select.h"
#include "strcase.h"
/* The last 3 #include files should be in this order */
@@ -45,6 +49,24 @@
#define HASHKEY_SIZE 128
+static void connc_discard_conn(struct conncache *connc,
+ struct Curl_easy *last_data,
+ struct connectdata *conn,
+ bool aborted);
+static void connc_disconnect(struct Curl_easy *data,
+ struct connectdata *conn,
+ struct conncache *connc,
+ bool do_shutdown);
+static void connc_run_conn_shutdown(struct Curl_easy *data,
+ struct connectdata *conn,
+ bool *done);
+static void connc_run_conn_shutdown_handler(struct Curl_easy *data,
+ struct connectdata *conn);
+static CURLMcode connc_update_shutdown_ev(struct Curl_multi *multi,
+ struct Curl_easy *data,
+ struct connectdata *conn);
+static void connc_shutdown_all(struct conncache *connc, int timeout_ms);
+
static CURLcode bundle_create(struct connectbundle **bundlep)
{
DEBUGASSERT(*bundlep == NULL);
@@ -100,25 +122,35 @@ static void free_bundle_hash_entry(void *freethis)
bundle_destroy(b);
}
-int Curl_conncache_init(struct conncache *connc, size_t size)
+int Curl_conncache_init(struct conncache *connc,
+ struct Curl_multi *multi, size_t size)
{
/* allocate a new easy handle to use when closing cached connections */
connc->closure_handle = curl_easy_init();
if(!connc->closure_handle)
return 1; /* bad */
connc->closure_handle->state.internal = true;
+ #ifdef DEBUGBUILD
+ if(getenv("CURL_DEBUG"))
+ connc->closure_handle->set.verbose = true;
+#endif
Curl_hash_init(&connc->hash, size, Curl_hash_str,
Curl_str_key_compare, free_bundle_hash_entry);
connc->closure_handle->state.conn_cache = connc;
+ connc->multi = multi;
+ Curl_llist_init(&connc->shutdowns.conn_list, NULL);
return 0; /* good */
}
void Curl_conncache_destroy(struct conncache *connc)
{
- if(connc)
+ if(connc) {
Curl_hash_destroy(&connc->hash);
+ connc->multi = NULL;
+ DEBUGASSERT(!Curl_llist_count(&connc->shutdowns.conn_list));
+ }
}
/* creates a key to find a bundle for this connection */
@@ -180,15 +212,14 @@ Curl_conncache_find_bundle(struct Curl_easy *data,
return bundle;
}
-static void *conncache_add_bundle(struct conncache *connc,
- char *key,
- struct connectbundle *bundle)
+static void *connc_add_bundle(struct conncache *connc,
+ char *key, struct connectbundle *bundle)
{
return Curl_hash_add(&connc->hash, key, strlen(key), bundle);
}
-static void conncache_remove_bundle(struct conncache *connc,
- struct connectbundle *bundle)
+static void connc_remove_bundle(struct conncache *connc,
+ struct connectbundle *bundle)
{
struct Curl_hash_iterator iter;
struct Curl_hash_element *he;
@@ -231,7 +262,7 @@ CURLcode Curl_conncache_add_conn(struct Curl_easy *data)
hashkey(conn, key, sizeof(key));
- if(!conncache_add_bundle(data->state.conn_cache, key, bundle)) {
+ if(!connc_add_bundle(data->state.conn_cache, key, bundle)) {
bundle_destroy(bundle);
result = CURLE_OUT_OF_MEMORY;
goto unlock;
@@ -252,6 +283,23 @@ unlock:
return result;
}
+static void connc_remove_conn(struct conncache *connc,
+ struct connectdata *conn)
+{
+ struct connectbundle *bundle = conn->bundle;
+
+ /* The bundle pointer can be NULL, since this function can be called
+ due to a failed connection attempt, before being added to a bundle */
+ if(bundle) {
+ bundle_remove_conn(bundle, conn);
+ if(connc && bundle->num_connections == 0)
+ connc_remove_bundle(connc, bundle);
+ conn->bundle = NULL; /* removed from it */
+ if(connc)
+ connc->num_conn--;
+ }
+}
+
/*
* Removes the connectdata object from the connection cache, but the transfer
* still owns this connection.
@@ -262,28 +310,16 @@ unlock:
void Curl_conncache_remove_conn(struct Curl_easy *data,
struct connectdata *conn, bool lock)
{
- struct connectbundle *bundle = conn->bundle;
struct conncache *connc = data->state.conn_cache;
- /* The bundle pointer can be NULL, since this function can be called
- due to a failed connection attempt, before being added to a bundle */
- if(bundle) {
- if(lock) {
- CONNCACHE_LOCK(data);
- }
- bundle_remove_conn(bundle, conn);
- if(bundle->num_connections == 0)
- conncache_remove_bundle(connc, bundle);
- conn->bundle = NULL; /* removed from it */
- if(connc) {
- connc->num_conn--;
- DEBUGF(infof(data, "The cache now contains %zu members",
- connc->num_conn));
- }
- if(lock) {
- CONNCACHE_UNLOCK(data);
- }
- }
+ if(lock)
+ CONNCACHE_LOCK(data);
+ connc_remove_conn(connc, conn);
+ if(lock)
+ CONNCACHE_UNLOCK(data);
+ if(connc)
+ DEBUGF(infof(data, "The cache now contains %zu members",
+ connc->num_conn));
}
/* This function iterates the entire connection cache and calls the function
@@ -345,7 +381,7 @@ bool Curl_conncache_foreach(struct Curl_easy *data,
up a cache!
*/
static struct connectdata *
-conncache_find_first_connection(struct conncache *connc)
+connc_find_first_connection(struct conncache *connc)
{
struct Curl_hash_iterator iter;
struct Curl_hash_element *he;
@@ -394,8 +430,7 @@ bool Curl_conncache_return_conn(struct Curl_easy *data,
important that details from this (unrelated) disconnect does not
taint meta-data in the data handle. */
struct conncache *connc = data->state.conn_cache;
- Curl_disconnect(connc->closure_handle, conn_candidate,
- /* dead_connection */ FALSE);
+ connc_disconnect(NULL, conn_candidate, connc, TRUE);
}
}
@@ -516,33 +551,589 @@ Curl_conncache_extract_oldest(struct Curl_easy *data)
return conn_candidate;
}
-void Curl_conncache_close_all_connections(struct conncache *connc)
+static void connc_shutdown_discard_all(struct conncache *connc)
+{
+ struct Curl_llist_element *e = connc->shutdowns.conn_list.head;
+ struct connectdata *conn;
+
+ if(!e)
+ return;
+
+ DEBUGF(infof(connc->closure_handle, "conncache_shutdown_discard_all"));
+ DEBUGASSERT(!connc->shutdowns.iter_locked);
+ connc->shutdowns.iter_locked = TRUE;
+ while(e) {
+ conn = e->ptr;
+ Curl_llist_remove(&connc->shutdowns.conn_list, e, NULL);
+ DEBUGF(infof(connc->closure_handle, "discard connection #%"
+ CURL_FORMAT_CURL_OFF_T, conn->connection_id));
+ connc_disconnect(NULL, conn, connc, FALSE);
+ e = connc->shutdowns.conn_list.head;
+ }
+ connc->shutdowns.iter_locked = FALSE;
+}
+
+static void connc_close_all(struct conncache *connc)
{
+ struct Curl_easy *data = connc->closure_handle;
struct connectdata *conn;
+ int timeout_ms = 0;
SIGPIPE_VARIABLE(pipe_st);
- if(!connc->closure_handle)
+
+ if(!data)
return;
- conn = conncache_find_first_connection(connc);
+ /* Move all connections to the shutdown list */
+ conn = connc_find_first_connection(connc);
while(conn) {
- sigpipe_ignore(connc->closure_handle, &pipe_st);
+ connc_remove_conn(connc, conn);
+ sigpipe_ignore(data, &pipe_st);
/* This will remove the connection from the cache */
connclose(conn, "kill all");
Curl_conncache_remove_conn(connc->closure_handle, conn, TRUE);
- Curl_disconnect(connc->closure_handle, conn, FALSE);
+ connc_discard_conn(connc, connc->closure_handle, conn, FALSE);
sigpipe_restore(&pipe_st);
- conn = conncache_find_first_connection(connc);
+ conn = connc_find_first_connection(connc);
}
- sigpipe_ignore(connc->closure_handle, &pipe_st);
+ /* Just for testing, run graceful shutdown */
+#ifdef DEBUGBUILD
+ {
+ char *p = getenv("CURL_GRACEFUL_SHUTDOWN");
+ if(p) {
+ long l = strtol(p, NULL, 10);
+ if(l > 0 && l < INT_MAX)
+ timeout_ms = (int)l;
+ }
+ }
+#endif
+ connc_shutdown_all(connc, timeout_ms);
+
+ /* discard all connections in the shutdown list */
+ connc_shutdown_discard_all(connc);
- Curl_hostcache_clean(connc->closure_handle,
- connc->closure_handle->dns.hostcache);
- Curl_close(&connc->closure_handle);
+ sigpipe_ignore(data, &pipe_st);
+ Curl_hostcache_clean(data, data->dns.hostcache);
+ Curl_close(&data);
sigpipe_restore(&pipe_st);
}
+void Curl_conncache_close_all_connections(struct conncache *connc)
+{
+ connc_close_all(connc);
+}
+
+static void connc_shutdown_discard_oldest(struct conncache *connc)
+{
+ struct Curl_llist_element *e;
+ struct connectdata *conn;
+ SIGPIPE_VARIABLE(pipe_st);
+
+ DEBUGASSERT(!connc->shutdowns.iter_locked);
+ if(connc->shutdowns.iter_locked)
+ return;
+
+ e = connc->shutdowns.conn_list.head;
+ if(e) {
+ conn = e->ptr;
+ Curl_llist_remove(&connc->shutdowns.conn_list, e, NULL);
+ sigpipe_ignore(connc->closure_handle, &pipe_st);
+ connc_disconnect(NULL, conn, connc, FALSE);
+ sigpipe_restore(&pipe_st);
+ }
+}
+
+static void connc_discard_conn(struct conncache *connc,
+ struct Curl_easy *last_data,
+ struct connectdata *conn,
+ bool aborted)
+{
+ /* `last_data`, if present, is the transfer that last worked with
+ * the connection. It is present when the connection is being shut down
+ * via `Curl_conncache_discard_conn()`, e.g. when the transfer failed
+ * or does not allow connection reuse.
+ * Using the original handle is necessary for shutting down the protocol
+ * handler belonging to the connection. Protocols like 'file:' rely on
+ * being invoked to clean up their allocations in the easy handle.
+ * When a connection comes from the cache, the transfer is no longer
+ * there and we use the cache is own closure handle.
+ */
+ struct Curl_easy *data = last_data? last_data : connc->closure_handle;
+ bool done = FALSE;
+
+ DEBUGASSERT(data);
+ DEBUGASSERT(connc);
+ DEBUGASSERT(!conn->bundle);
+
+ /*
+ * If this connection is not marked to force-close, leave it open if there
+ * are other users of it
+ */
+ if(CONN_INUSE(conn) && !aborted) {
+ DEBUGF(infof(data, "[CCACHE] not discarding #%" CURL_FORMAT_CURL_OFF_T
+ " still in use by %zu transfers", conn->connection_id,
+ CONN_INUSE(conn)));
+ return;
+ }
+
+ /* treat the connection as aborted in CONNECT_ONLY situations, we do
+ * not know what the APP did with it. */
+ if(conn->connect_only)
+ aborted = TRUE;
+ conn->bits.aborted = aborted;
+
+ /* We do not shutdown dead connections. The term 'dead' can be misleading
+ * here, as we also mark errored connections/transfers as 'dead'.
+ * If we do a shutdown for an aborted transfer, the server might think
+ * it was successful otherwise (for example an ftps: upload). This is
+ * not what we want. */
+ if(aborted)
+ done = TRUE;
+ if(!done) {
+ /* Attempt to shutdown the connection right away. */
+ Curl_attach_connection(data, conn);
+ connc_run_conn_shutdown(data, conn, &done);
+ DEBUGF(infof(data, "[CCACHE] shutdown #%" CURL_FORMAT_CURL_OFF_T
+ ", done=%d",conn->connection_id, done));
+ Curl_detach_connection(data);
+ }
+
+ if(done) {
+ connc_disconnect(data, conn, connc, FALSE);
+ return;
+ }
+
+ DEBUGASSERT(!connc->shutdowns.iter_locked);
+ if(connc->shutdowns.iter_locked) {
+ DEBUGF(infof(data, "[CCACHE] discarding #%" CURL_FORMAT_CURL_OFF_T
+ ", list locked", conn->connection_id));
+ connc_disconnect(data, conn, connc, FALSE);
+ return;
+ }
+
+ /* Add the connection to our shutdown list for non-blocking shutdown
+ * during multi processing. */
+ if(data->multi && data->multi->max_shutdown_connections > 0 &&
+ (data->multi->max_shutdown_connections >=
+ (long)Curl_llist_count(&connc->shutdowns.conn_list))) {
+ DEBUGF(infof(data, "[CCACHE] discarding oldest shutdown connection "
+ "due to limit of %ld",
+ data->multi->max_shutdown_connections));
+ connc_shutdown_discard_oldest(connc);
+ }
+
+ if(data->multi && data->multi->socket_cb) {
+ DEBUGASSERT(connc == &data->multi->conn_cache);
+ /* Start with an empty shutdown pollset, so out internal closure handle
+ * is added to the sockets. */
+ memset(&conn->shutdown_poll, 0, sizeof(conn->shutdown_poll));
+ if(connc_update_shutdown_ev(data->multi, connc->closure_handle, conn)) {
+ DEBUGF(infof(data, "[CCACHE] update events for shutdown failed, "
+ "discarding #%" CURL_FORMAT_CURL_OFF_T,
+ conn->connection_id));
+ connc_disconnect(data, conn, connc, FALSE);
+ return;
+ }
+ }
+
+ Curl_llist_append(&connc->shutdowns.conn_list, conn, &conn->bundle_node);
+ DEBUGF(infof(data, "[CCACHE] added #%" CURL_FORMAT_CURL_OFF_T
+ " to shutdown list of length %zu", conn->connection_id,
+ Curl_llist_count(&connc->shutdowns.conn_list)));
+}
+
+void Curl_conncache_disconnect(struct Curl_easy *data,
+ struct connectdata *conn,
+ bool aborted)
+{
+ DEBUGASSERT(data);
+ /* Connection must no longer be in and connection cache */
+ DEBUGASSERT(!conn->bundle);
+
+ if(data->multi) {
+ /* Add it to the multi's conncache for shutdown handling */
+ infof(data, "%s connection #%" CURL_FORMAT_CURL_OFF_T,
+ aborted? "closing" : "shutting down", conn->connection_id);
+ connc_discard_conn(&data->multi->conn_cache, data, conn, aborted);
+ }
+ else {
+ /* No multi available. Make a best-effort shutdown + close */
+ infof(data, "closing connection #%" CURL_FORMAT_CURL_OFF_T,
+ conn->connection_id);
+ DEBUGASSERT(!conn->bundle);
+ connc_run_conn_shutdown_handler(data, conn);
+ connc_disconnect(data, conn, NULL, !aborted);
+ }
+}
+
+static void connc_run_conn_shutdown_handler(struct Curl_easy *data,
+ struct connectdata *conn)
+{
+ if(!conn->bits.shutdown_handler) {
+ if(conn->dns_entry) {
+ Curl_resolv_unlock(data, conn->dns_entry);
+ conn->dns_entry = NULL;
+ }
+
+ /* Cleanup NTLM connection-related data */
+ Curl_http_auth_cleanup_ntlm(conn);
+
+ /* Cleanup NEGOTIATE connection-related data */
+ Curl_http_auth_cleanup_negotiate(conn);
+
+ if(conn->handler && conn->handler->disconnect) {
+ /* This is set if protocol-specific cleanups should be made */
+ DEBUGF(infof(data, "connection #%" CURL_FORMAT_CURL_OFF_T
+ ", shutdown protocol handler (aborted=%d)",
+ conn->connection_id, conn->bits.aborted));
+ conn->handler->disconnect(data, conn, conn->bits.aborted);
+ }
+
+ /* possible left-overs from the async name resolvers */
+ Curl_resolver_cancel(data);
+
+ conn->bits.shutdown_handler = TRUE;
+ }
+}
+
+static void connc_run_conn_shutdown(struct Curl_easy *data,
+ struct connectdata *conn,
+ bool *done)
+{
+ CURLcode r1, r2;
+ bool done1, done2;
+
+ /* We expect to be attached when called */
+ DEBUGASSERT(data->conn == conn);
+
+ connc_run_conn_shutdown_handler(data, conn);
+
+ if(conn->bits.shutdown_filters) {
+ *done = TRUE;
+ return;
+ }
+
+ if(!conn->connect_only && Curl_conn_is_connected(conn, FIRSTSOCKET))
+ r1 = Curl_conn_shutdown(data, FIRSTSOCKET, &done1);
+ else {
+ r1 = CURLE_OK;
+ done1 = TRUE;
+ }
+
+ if(!conn->connect_only && Curl_conn_is_connected(conn, SECONDARYSOCKET))
+ r2 = Curl_conn_shutdown(data, SECONDARYSOCKET, &done2);
+ else {
+ r2 = CURLE_OK;
+ done2 = TRUE;
+ }
+
+ /* we are done when any failed or both report success */
+ *done = (r1 || r2 || (done1 && done2));
+ if(*done)
+ conn->bits.shutdown_filters = TRUE;
+}
+
+CURLcode Curl_conncache_add_pollfds(struct conncache *connc,
+ struct curl_pollfds *cpfds)
+{
+ CURLcode result = CURLE_OK;
+
+ DEBUGASSERT(!connc->shutdowns.iter_locked);
+ connc->shutdowns.iter_locked = TRUE;
+ if(connc->shutdowns.conn_list.head) {
+ struct Curl_llist_element *e;
+ struct easy_pollset ps;
+ struct connectdata *conn;
+
+ for(e = connc->shutdowns.conn_list.head; e; e = e->next) {
+ conn = e->ptr;
+ memset(&ps, 0, sizeof(ps));
+ Curl_attach_connection(connc->closure_handle, conn);
+ Curl_conn_adjust_pollset(connc->closure_handle, &ps);
+ Curl_detach_connection(connc->closure_handle);
+
+ result = Curl_pollfds_add_ps(cpfds, &ps);
+ if(result) {
+ Curl_pollfds_cleanup(cpfds);
+ goto out;
+ }
+ }
+ }
+out:
+ connc->shutdowns.iter_locked = FALSE;
+ return result;
+}
+
+CURLcode Curl_conncache_add_waitfds(struct conncache *connc,
+ struct curl_waitfds *cwfds)
+{
+ CURLcode result = CURLE_OK;
+
+ DEBUGASSERT(!connc->shutdowns.iter_locked);
+ connc->shutdowns.iter_locked = TRUE;
+ if(connc->shutdowns.conn_list.head) {
+ struct Curl_llist_element *e;
+ struct easy_pollset ps;
+ struct connectdata *conn;
+
+ for(e = connc->shutdowns.conn_list.head; e; e = e->next) {
+ conn = e->ptr;
+ memset(&ps, 0, sizeof(ps));
+ Curl_attach_connection(connc->closure_handle, conn);
+ Curl_conn_adjust_pollset(connc->closure_handle, &ps);
+ Curl_detach_connection(connc->closure_handle);
+
+ result = Curl_waitfds_add_ps(cwfds, &ps);
+ if(result)
+ goto out;
+ }
+ }
+out:
+ connc->shutdowns.iter_locked = FALSE;
+ return result;
+}
+
+static void connc_perform(struct conncache *connc)
+{
+ struct Curl_easy *data = connc->closure_handle;
+ struct Curl_llist_element *e = connc->shutdowns.conn_list.head;
+ struct Curl_llist_element *enext;
+ struct connectdata *conn;
+ bool done;
+
+ if(!e)
+ return;
+
+ DEBUGASSERT(data);
+ DEBUGASSERT(!connc->shutdowns.iter_locked);
+ DEBUGF(infof(data, "[CCACHE] perform, %zu connections being shutdown",
+ Curl_llist_count(&connc->shutdowns.conn_list)));
+ connc->shutdowns.iter_locked = TRUE;
+ while(e) {
+ enext = e->next;
+ conn = e->ptr;
+ Curl_attach_connection(data, conn);
+ connc_run_conn_shutdown(data, conn, &done);
+ DEBUGF(infof(data, "[CCACHE] shutdown #%" CURL_FORMAT_CURL_OFF_T
+ ", done=%d", conn->connection_id, done));
+ Curl_detach_connection(data);
+ if(done) {
+ Curl_llist_remove(&connc->shutdowns.conn_list, e, NULL);
+ connc_disconnect(NULL, conn, connc, FALSE);
+ }
+ e = enext;
+ }
+ connc->shutdowns.iter_locked = FALSE;
+}
+
+void Curl_conncache_multi_perform(struct Curl_multi *multi)
+{
+ connc_perform(&multi->conn_cache);
+}
+
+
+/*
+ * Disconnects the given connection. Note the connection may not be the
+ * primary connection, like when freeing room in the connection cache or
+ * killing of a dead old connection.
+ *
+ * A connection needs an easy handle when closing down. We support this passed
+ * in separately since the connection to get closed here is often already
+ * disassociated from an easy handle.
+ *
+ * This function MUST NOT reset state in the Curl_easy struct if that
+ * is not strictly bound to the life-time of *this* particular connection.
+ *
+ */
+static void connc_disconnect(struct Curl_easy *data,
+ struct connectdata *conn,
+ struct conncache *connc,
+ bool do_shutdown)
+{
+ bool done;
+
+ /* there must be a connection to close */
+ DEBUGASSERT(conn);
+ /* it must be removed from the connection cache */
+ DEBUGASSERT(!conn->bundle);
+ /* there must be an associated transfer */
+ DEBUGASSERT(data || connc);
+ if(!data)
+ data = connc->closure_handle;
+
+ /* the transfer must be detached from the connection */
+ DEBUGASSERT(data && !data->conn);
+
+ Curl_attach_connection(data, conn);
+
+ if(connc && connc->multi && connc->multi->socket_cb) {
+ struct easy_pollset ps;
+ /* With an empty pollset, all previously polled sockets will be removed
+ * via the multi_socket API callback. */
+ memset(&ps, 0, sizeof(ps));
+ (void)Curl_multi_pollset_ev(connc->multi, data, &ps, &conn->shutdown_poll);
+ }
+
+ connc_run_conn_shutdown_handler(data, conn);
+ if(do_shutdown) {
+ /* Make a last attempt to shutdown handlers and filters, if
+ * not done so already. */
+ connc_run_conn_shutdown(data, conn, &done);
+ }
+
+ if(connc)
+ DEBUGF(infof(data, "[CCACHE] closing #%" CURL_FORMAT_CURL_OFF_T,
+ conn->connection_id));
+ else
+ DEBUGF(infof(data, "closing connection #%" CURL_FORMAT_CURL_OFF_T,
+ conn->connection_id));
+ Curl_conn_close(data, SECONDARYSOCKET);
+ Curl_conn_close(data, FIRSTSOCKET);
+ Curl_detach_connection(data);
+
+ Curl_conn_free(data, conn);
+}
+
+
+static CURLMcode connc_update_shutdown_ev(struct Curl_multi *multi,
+ struct Curl_easy *data,
+ struct connectdata *conn)
+{
+ struct easy_pollset ps;
+ CURLMcode mresult;
+
+ DEBUGASSERT(data);
+ DEBUGASSERT(multi);
+ DEBUGASSERT(multi->socket_cb);
+
+ memset(&ps, 0, sizeof(ps));
+ Curl_attach_connection(data, conn);
+ Curl_conn_adjust_pollset(data, &ps);
+ Curl_detach_connection(data);
+
+ mresult = Curl_multi_pollset_ev(multi, data, &ps, &conn->shutdown_poll);
+
+ if(!mresult) /* Remember for next time */
+ memcpy(&conn->shutdown_poll, &ps, sizeof(ps));
+ return mresult;
+}
+
+void Curl_conncache_multi_socket(struct Curl_multi *multi,
+ curl_socket_t s, int ev_bitmask)
+{
+ struct conncache *connc = &multi->conn_cache;
+ struct Curl_easy *data = connc->closure_handle;
+ struct Curl_llist_element *e = connc->shutdowns.conn_list.head;
+ struct connectdata *conn;
+ bool done;
+
+ (void)ev_bitmask;
+ DEBUGASSERT(multi->socket_cb);
+ if(!e)
+ return;
+
+ connc->shutdowns.iter_locked = TRUE;
+ while(e) {
+ conn = e->ptr;
+ if(s == conn->sock[FIRSTSOCKET] || s == conn->sock[SECONDARYSOCKET]) {
+ Curl_attach_connection(data, conn);
+ connc_run_conn_shutdown(data, conn, &done);
+ DEBUGF(infof(data, "[CCACHE] shutdown #%" CURL_FORMAT_CURL_OFF_T
+ ", done=%d", conn->connection_id, done));
+ Curl_detach_connection(data);
+ if(done || connc_update_shutdown_ev(multi, data, conn)) {
+ Curl_llist_remove(&connc->shutdowns.conn_list, e, NULL);
+ connc_disconnect(NULL, conn, connc, FALSE);
+ }
+ break;
+ }
+ e = e->next;
+ }
+ connc->shutdowns.iter_locked = FALSE;
+}
+
+void Curl_conncache_multi_close_all(struct Curl_multi *multi)
+{
+ connc_close_all(&multi->conn_cache);
+}
+
+
+#define NUM_POLLS_ON_STACK 10
+
+static CURLcode connc_shutdown_wait(struct conncache *connc, int timeout_ms)
+{
+ struct pollfd a_few_on_stack[NUM_POLLS_ON_STACK];
+ struct curl_pollfds cpfds;
+ CURLcode result;
+
+ Curl_pollfds_init(&cpfds, a_few_on_stack, NUM_POLLS_ON_STACK);
+
+ result = Curl_conncache_add_pollfds(connc, &cpfds);
+ if(result)
+ goto out;
+
+ Curl_poll(cpfds.pfds, cpfds.n, CURLMIN(timeout_ms, 1000));
+
+out:
+ Curl_pollfds_cleanup(&cpfds);
+ return result;
+}
+
+static void connc_shutdown_all(struct conncache *connc, int timeout_ms)
+{
+ struct Curl_easy *data = connc->closure_handle;
+ struct connectdata *conn;
+ struct curltime started = Curl_now();
+
+ if(!data)
+ return;
+ (void)data;
+
+ DEBUGF(infof(data, "conncache shutdown all"));
+
+ /* Move all connections into the shutdown queue */
+ conn = connc_find_first_connection(connc);
+ while(conn) {
+ /* This will remove the connection from the cache */
+ DEBUGF(infof(data, "moving connection %" CURL_FORMAT_CURL_OFF_T
+ " to shutdown queue", conn->connection_id));
+ connc_remove_conn(connc, conn);
+ connc_discard_conn(connc, NULL, conn, FALSE);
+ conn = connc_find_first_connection(connc);
+ }
+
+ DEBUGASSERT(!connc->shutdowns.iter_locked);
+ while(connc->shutdowns.conn_list.head) {
+ timediff_t timespent;
+ int remain_ms;
+
+ connc_perform(connc);
+
+ if(!connc->shutdowns.conn_list.head) {
+ DEBUGF(infof(data, "conncache shutdown ok"));
+ break;
+ }
+
+ /* wait for activity, timeout or "nothing" */
+ timespent = Curl_timediff(Curl_now(), started);
+ if(timespent >= (timediff_t)timeout_ms) {
+ DEBUGF(infof(data, "conncache shutdown %s",
+ (timeout_ms > 0)? "timeout" : "best effort done"));
+ break;
+ }
+
+ remain_ms = timeout_ms - (int)timespent;
+ if(connc_shutdown_wait(connc, remain_ms)) {
+ DEBUGF(infof(data, "conncache shutdown all, abort"));
+ break;
+ }
+ }
+
+ /* Due to errors/timeout, we might come here without being full ydone. */
+ connc_shutdown_discard_all(connc);
+}
+
#if 0
/* Useful for debugging the connection cache */
void Curl_conncache_print(struct conncache *connc)
diff --git a/libs/libcurl/src/conncache.h b/libs/libcurl/src/conncache.h
index fe51706b56..7baae615a0 100644
--- a/libs/libcurl/src/conncache.h
+++ b/libs/libcurl/src/conncache.h
@@ -35,6 +35,14 @@
#include "timeval.h"
struct connectdata;
+struct curl_pollfds;
+struct curl_waitfds;
+struct Curl_multi;
+
+struct connshutdowns {
+ struct Curl_llist conn_list; /* The connectdata to shut down */
+ BIT(iter_locked); /* TRUE while iterating the list */
+};
struct conncache {
struct Curl_hash hash;
@@ -42,15 +50,17 @@ struct conncache {
curl_off_t next_connection_id;
curl_off_t next_easy_id;
struct curltime last_cleanup;
+ struct connshutdowns shutdowns;
/* handle used for closing cached connections */
struct Curl_easy *closure_handle;
+ struct Curl_multi *multi; /* Optional, set if cache belongs to multi */
};
#define BUNDLE_NO_MULTIUSE -1
#define BUNDLE_UNKNOWN 0 /* initial value */
#define BUNDLE_MULTIPLEX 2
-#ifdef CURLDEBUG
+#ifdef DEBUGBUILD
/* the debug versions of these macros make extra certain that the lock is
never doubly locked or unlocked */
#define CONNCACHE_LOCK(x) \
@@ -84,8 +94,12 @@ struct connectbundle {
struct Curl_llist conn_list; /* The connectdata members of the bundle */
};
-/* returns 1 on error, 0 is fine */
-int Curl_conncache_init(struct conncache *, size_t size);
+/* Init the cache, pass multi only if cache is owned by it.
+ * returns 1 on error, 0 is fine.
+ */
+int Curl_conncache_init(struct conncache *,
+ struct Curl_multi *multi,
+ size_t size);
void Curl_conncache_destroy(struct conncache *connc);
/* return the correct bundle, to a host or a proxy */
@@ -119,4 +133,32 @@ Curl_conncache_extract_oldest(struct Curl_easy *data);
void Curl_conncache_close_all_connections(struct conncache *connc);
void Curl_conncache_print(struct conncache *connc);
+/**
+ * Tear down the connection. If `aborted` is FALSE, the connection
+ * will be shut down first before discarding. If the shutdown
+ * is not immediately complete, the connection
+ * will be placed into the cache is shutdown queue.
+ */
+void Curl_conncache_disconnect(struct Curl_easy *data,
+ struct connectdata *conn,
+ bool aborted);
+
+/**
+ * Add sockets and POLLIN/OUT flags for connections handled by the cache.
+ */
+CURLcode Curl_conncache_add_pollfds(struct conncache *connc,
+ struct curl_pollfds *cpfds);
+CURLcode Curl_conncache_add_waitfds(struct conncache *connc,
+ struct curl_waitfds *cwfds);
+
+/**
+ * Perform maintenance on connections in the cache. Specifically,
+ * progress the shutdown of connections in the queue.
+ */
+void Curl_conncache_multi_perform(struct Curl_multi *multi);
+
+void Curl_conncache_multi_socket(struct Curl_multi *multi,
+ curl_socket_t s, int ev_bitmask);
+void Curl_conncache_multi_close_all(struct Curl_multi *multi);
+
#endif /* HEADER_CURL_CONNCACHE_H */
diff --git a/libs/libcurl/src/connect.c b/libs/libcurl/src/connect.c
index 7ca66eaea1..f31a4d7c88 100644
--- a/libs/libcurl/src/connect.c
+++ b/libs/libcurl/src/connect.c
@@ -90,7 +90,7 @@
/*
* Curl_timeleft() returns the amount of milliseconds left allowed for the
- * transfer/connection. If the value is 0, there's no timeout (ie there's
+ * transfer/connection. If the value is 0, there is no timeout (ie there is
* infinite time left). If the value is negative, the timeout time has already
* elapsed.
* @param data the transfer to check on
@@ -142,6 +142,49 @@ timediff_t Curl_timeleft(struct Curl_easy *data,
return (ctimeleft_ms < timeleft_ms)? ctimeleft_ms : timeleft_ms;
}
+void Curl_shutdown_start(struct Curl_easy *data, int sockindex,
+ struct curltime *nowp)
+{
+ struct curltime now;
+
+ DEBUGASSERT(data->conn);
+ if(!nowp) {
+ now = Curl_now();
+ nowp = &now;
+ }
+ data->conn->shutdown.start[sockindex] = *nowp;
+ data->conn->shutdown.timeout_ms = (data->set.shutdowntimeout > 0) ?
+ data->set.shutdowntimeout : DEFAULT_SHUTDOWN_TIMEOUT_MS;
+}
+
+timediff_t Curl_shutdown_timeleft(struct connectdata *conn, int sockindex,
+ struct curltime *nowp)
+{
+ struct curltime now;
+
+ if(!conn->shutdown.start[sockindex].tv_sec || !conn->shutdown.timeout_ms)
+ return 0; /* not started or no limits */
+
+ if(!nowp) {
+ now = Curl_now();
+ nowp = &now;
+ }
+ return conn->shutdown.timeout_ms -
+ Curl_timediff(*nowp, conn->shutdown.start[sockindex]);
+}
+
+void Curl_shutdown_clear(struct Curl_easy *data, int sockindex)
+{
+ struct curltime *pt = &data->conn->shutdown.start[sockindex];
+ memset(pt, 0, sizeof(*pt));
+}
+
+bool Curl_shutdown_started(struct Curl_easy *data, int sockindex)
+{
+ struct curltime *pt = &data->conn->shutdown.start[sockindex];
+ return (pt->tv_sec > 0) || (pt->tv_usec > 0);
+}
+
/* Copies connection info into the transfer handle to make it available when
the transfer handle is no longer associated with the connection. */
void Curl_persistconninfo(struct Curl_easy *data, struct connectdata *conn,
@@ -317,7 +360,7 @@ void Curl_conncontrol(struct connectdata *conn,
#endif
)
{
- /* close if a connection, or a stream that isn't multiplexed. */
+ /* close if a connection, or a stream that is not multiplexed. */
/* This function will be called both before and after this connection is
associated with a transfer. */
bool closeit, is_multiplex;
@@ -358,6 +401,7 @@ struct eyeballer {
BIT(has_started); /* attempts have started */
BIT(is_done); /* out of addresses/time */
BIT(connected); /* cf has connected */
+ BIT(shutdown); /* cf has shutdown */
BIT(inconclusive); /* connect was not a hard failure, we
* might talk to a restarting server */
};
@@ -464,7 +508,7 @@ static void baller_initiate(struct Curl_cfilter *cf,
CURLcode result;
- /* Don't close a previous cfilter yet to ensure that the next IP's
+ /* Do not close a previous cfilter yet to ensure that the next IP's
socket gets a different file descriptor, which can prevent bugs when
the curl_multi_socket_action interface is used with certain select()
replacements such as kqueue. */
@@ -744,7 +788,7 @@ evaluate:
}
/*
- * Connect to the given host with timeout, proxy or remote doesn't matter.
+ * Connect to the given host with timeout, proxy or remote does not matter.
* There might be more than one IP address to try out.
*/
static CURLcode start_connect(struct Curl_cfilter *cf,
@@ -857,6 +901,46 @@ static void cf_he_ctx_clear(struct Curl_cfilter *cf, struct Curl_easy *data)
ctx->winner = NULL;
}
+static CURLcode cf_he_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data, bool *done)
+{
+ struct cf_he_ctx *ctx = cf->ctx;
+ size_t i;
+ CURLcode result = CURLE_OK;
+
+ DEBUGASSERT(data);
+ if(cf->connected) {
+ *done = TRUE;
+ return CURLE_OK;
+ }
+
+ /* shutdown all ballers that have not done so already. If one fails,
+ * continue shutting down others until all are shutdown. */
+ for(i = 0; i < ARRAYSIZE(ctx->baller); i++) {
+ struct eyeballer *baller = ctx->baller[i];
+ bool bdone = FALSE;
+ if(!baller || !baller->cf || baller->shutdown)
+ continue;
+ baller->result = baller->cf->cft->do_shutdown(baller->cf, data, &bdone);
+ if(baller->result || bdone)
+ baller->shutdown = TRUE; /* treat a failed shutdown as done */
+ }
+
+ *done = TRUE;
+ for(i = 0; i < ARRAYSIZE(ctx->baller); i++) {
+ if(ctx->baller[i] && !ctx->baller[i]->shutdown)
+ *done = FALSE;
+ }
+ if(*done) {
+ for(i = 0; i < ARRAYSIZE(ctx->baller); i++) {
+ if(ctx->baller[i] && ctx->baller[i]->result)
+ result = ctx->baller[i]->result;
+ }
+ }
+ CURL_TRC_CF(data, cf, "shutdown -> %d, done=%d", result, *done);
+ return result;
+}
+
static void cf_he_adjust_pollset(struct Curl_cfilter *cf,
struct Curl_easy *data,
struct easy_pollset *ps)
@@ -917,7 +1001,7 @@ static CURLcode cf_he_connect(struct Curl_cfilter *cf,
CF_CTRL_CONN_INFO_UPDATE, 0, NULL);
if(cf->conn->handler->protocol & PROTO_FAMILY_SSH)
- Curl_pgrsTime(data, TIMER_APPCONNECT); /* we're connected already */
+ Curl_pgrsTime(data, TIMER_APPCONNECT); /* we are connected already */
Curl_verboseconnect(data, cf->conn, cf->sockindex);
data->info.numconnects++; /* to track the # of connections made */
}
@@ -1052,6 +1136,7 @@ struct Curl_cftype Curl_cft_happy_eyeballs = {
cf_he_destroy,
cf_he_connect,
cf_he_close,
+ cf_he_shutdown,
Curl_cf_def_get_host,
cf_he_adjust_pollset,
cf_he_data_pending,
@@ -1112,7 +1197,7 @@ struct transport_provider {
};
static
-#ifndef DEBUGBUILD
+#ifndef UNITTESTS
const
#endif
struct transport_provider transport_providers[] = {
@@ -1316,6 +1401,7 @@ struct Curl_cftype Curl_cft_setup = {
cf_setup_destroy,
cf_setup_connect,
cf_setup_close,
+ Curl_cf_def_shutdown,
Curl_cf_def_get_host,
Curl_cf_def_adjust_pollset,
Curl_cf_def_data_pending,
@@ -1378,7 +1464,7 @@ out:
return result;
}
-#ifdef DEBUGBUILD
+#ifdef UNITTESTS
/* used by unit2600.c */
void Curl_debug_set_transport_provider(int transport,
cf_ip_connect_create *cf_create)
@@ -1391,7 +1477,7 @@ void Curl_debug_set_transport_provider(int transport,
}
}
}
-#endif /* DEBUGBUILD */
+#endif /* UNITTESTS */
CURLcode Curl_cf_setup_insert_after(struct Curl_cfilter *cf_at,
struct Curl_easy *data,
diff --git a/libs/libcurl/src/connect.h b/libs/libcurl/src/connect.h
index ce14ea845a..d590f7fd19 100644
--- a/libs/libcurl/src/connect.h
+++ b/libs/libcurl/src/connect.h
@@ -32,7 +32,7 @@
struct Curl_dns_entry;
struct ip_quadruple;
-/* generic function that returns how much time there's left to run, according
+/* generic function that returns how much time there is left to run, according
to the timeouts set */
timediff_t Curl_timeleft(struct Curl_easy *data,
struct curltime *nowp,
@@ -40,6 +40,21 @@ timediff_t Curl_timeleft(struct Curl_easy *data,
#define DEFAULT_CONNECT_TIMEOUT 300000 /* milliseconds == five minutes */
+#define DEFAULT_SHUTDOWN_TIMEOUT_MS (2 * 1000)
+
+void Curl_shutdown_start(struct Curl_easy *data, int sockindex,
+ struct curltime *nowp);
+
+/* return how much time there is left to shutdown the connection at
+ * sockindex. */
+timediff_t Curl_shutdown_timeleft(struct connectdata *conn, int sockindex,
+ struct curltime *nowp);
+
+void Curl_shutdown_clear(struct Curl_easy *data, int sockindex);
+
+/* TRUE iff shutdown has been started */
+bool Curl_shutdown_started(struct Curl_easy *data, int sockindex);
+
/*
* Used to extract socket and connectdata struct for the most recent
* transfer on the given Curl_easy.
@@ -125,7 +140,7 @@ CURLcode Curl_conn_setup(struct Curl_easy *data,
extern struct Curl_cftype Curl_cft_happy_eyeballs;
extern struct Curl_cftype Curl_cft_setup;
-#ifdef DEBUGBUILD
+#ifdef UNITTESTS
void Curl_debug_set_transport_provider(int transport,
cf_ip_connect_create *cf_create);
#endif
diff --git a/libs/libcurl/src/content_encoding.c b/libs/libcurl/src/content_encoding.c
index 2e8ec147e6..80c71afa72 100644
--- a/libs/libcurl/src/content_encoding.c
+++ b/libs/libcurl/src/content_encoding.c
@@ -82,7 +82,7 @@
#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */
#define HEAD_CRC 0x02 /* bit 1 set: header CRC present */
#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
-#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
+#define ORIG_NAME 0x08 /* bit 3 set: original filename present */
#define COMMENT 0x10 /* bit 4 set: file comment present */
#define RESERVED 0xE0 /* bits 5..7: reserved */
@@ -192,7 +192,7 @@ static CURLcode inflate_stream(struct Curl_easy *data,
zp->zlib_init != ZLIB_GZIP_INFLATING)
return exit_zlib(data, z, &zp->zlib_init, CURLE_WRITE_ERROR);
- /* Dynamically allocate a buffer for decompression because it's uncommonly
+ /* Dynamically allocate a buffer for decompression because it is uncommonly
large to hold on the stack */
decomp = malloc(DSIZ);
if(!decomp)
@@ -246,7 +246,7 @@ static CURLcode inflate_stream(struct Curl_easy *data,
to fix and continue anyway */
if(zp->zlib_init == ZLIB_INIT) {
/* Do not use inflateReset2(): only available since zlib 1.2.3.4. */
- (void) inflateEnd(z); /* don't care about the return code */
+ (void) inflateEnd(z); /* do not care about the return code */
if(inflateInit2(z, -MAX_WBITS) == Z_OK) {
z->next_in = orig_in;
z->avail_in = nread;
@@ -266,7 +266,7 @@ static CURLcode inflate_stream(struct Curl_easy *data,
}
free(decomp);
- /* We're about to leave this call so the `nread' data bytes won't be seen
+ /* We are about to leave this call so the `nread' data bytes will not be seen
again. If we are in a state that would wrongly allow restart in raw mode
at the next call, assume output has already started. */
if(nread && zp->zlib_init == ZLIB_INIT)
@@ -388,7 +388,7 @@ static gzip_status check_gzip_header(unsigned char const *data, ssize_t len,
flags = data[3];
if(method != Z_DEFLATED || (flags & RESERVED) != 0) {
- /* Can't handle this compression method or unknown flag */
+ /* cannot handle this compression method or unknown flag */
return GZIP_BAD;
}
@@ -412,7 +412,7 @@ static gzip_status check_gzip_header(unsigned char const *data, ssize_t len,
}
if(flags & ORIG_NAME) {
- /* Skip over NUL-terminated file name */
+ /* Skip over NUL-terminated filename */
while(len && *data) {
--len;
++data;
@@ -474,10 +474,10 @@ static CURLcode gzip_do_write(struct Curl_easy *data,
return exit_zlib(data, z, &zp->zlib_init, CURLE_WRITE_ERROR);
#else
- /* This next mess is to get around the potential case where there isn't
- * enough data passed in to skip over the gzip header. If that happens, we
- * malloc a block and copy what we have then wait for the next call. If
- * there still isn't enough (this is definitely a worst-case scenario), we
+ /* This next mess is to get around the potential case where there is not
+ * enough data passed in to skip over the gzip header. If that happens, we
+ * malloc a block and copy what we have then wait for the next call. If
+ * there still is not enough (this is definitely a worst-case scenario), we
* make the block bigger, copy the next part in and keep waiting.
*
* This is only required with zlib versions < 1.2.0.4 as newer versions
@@ -499,11 +499,11 @@ static CURLcode gzip_do_write(struct Curl_easy *data,
break;
case GZIP_UNDERFLOW:
- /* We need more data so we can find the end of the gzip header. It's
+ /* We need more data so we can find the end of the gzip header. it is
* possible that the memory block we malloc here will never be freed if
- * the transfer abruptly aborts after this point. Since it's unlikely
+ * the transfer abruptly aborts after this point. Since it is unlikely
* that circumstances will be right for this code path to be followed in
- * the first place, and it's even more unlikely for a transfer to fail
+ * the first place, and it is even more unlikely for a transfer to fail
* immediately afterwards, it should seldom be a problem.
*/
z->avail_in = (uInt) nbytes;
@@ -513,7 +513,7 @@ static CURLcode gzip_do_write(struct Curl_easy *data,
}
memcpy(z->next_in, buf, z->avail_in);
zp->zlib_init = ZLIB_GZIP_HEADER; /* Need more gzip header data state */
- /* We don't have any data to inflate yet */
+ /* We do not have any data to inflate yet */
return CURLE_OK;
case GZIP_BAD:
@@ -536,18 +536,18 @@ static CURLcode gzip_do_write(struct Curl_easy *data,
/* Append the new block of data to the previous one */
memcpy(z->next_in + z->avail_in - nbytes, buf, nbytes);
- switch(check_gzip_header(z->next_in, z->avail_in, &hlen)) {
+ switch(check_gzip_header(z->next_in, (ssize_t)z->avail_in, &hlen)) {
case GZIP_OK:
/* This is the zlib stream data */
free(z->next_in);
- /* Don't point into the malloced block since we just freed it */
+ /* Do not point into the malloced block since we just freed it */
z->next_in = (Bytef *) buf + hlen + nbytes - z->avail_in;
- z->avail_in = (uInt) (z->avail_in - hlen);
+ z->avail_in = z->avail_in - (uInt)hlen;
zp->zlib_init = ZLIB_GZIP_INFLATING; /* Inflating stream state */
break;
case GZIP_UNDERFLOW:
- /* We still don't have any data to inflate! */
+ /* We still do not have any data to inflate! */
return CURLE_OK;
case GZIP_BAD:
@@ -572,11 +572,11 @@ static CURLcode gzip_do_write(struct Curl_easy *data,
}
if(z->avail_in == 0) {
- /* We don't have any data to inflate; wait until next time */
+ /* We do not have any data to inflate; wait until next time */
return CURLE_OK;
}
- /* We've parsed the header, now uncompress the data */
+ /* We have parsed the header, now uncompress the data */
return inflate_stream(data, writer, type, ZLIB_GZIP_INFLATING);
#endif
}
@@ -966,7 +966,7 @@ static const struct Curl_cwtype *find_unencode_writer(const char *name,
return NULL;
}
-/* Set-up the unencoding stack from the Content-Encoding header value.
+/* Setup the unencoding stack from the Content-Encoding header value.
* See RFC 7231 section 3.1.2.2. */
CURLcode Curl_build_unencoding_stack(struct Curl_easy *data,
const char *enclist, int is_transfer)
@@ -994,6 +994,8 @@ CURLcode Curl_build_unencoding_stack(struct Curl_easy *data,
const struct Curl_cwtype *cwt;
struct Curl_cwriter *writer;
+ CURL_TRC_WRITE(data, "looking for %s decoder: %.*s",
+ is_transfer? "transfer" : "content", (int)namelen, name);
is_chunked = (is_transfer && (namelen == 7) &&
strncasecompare(name, "chunked", 7));
/* if we skip the decoding in this phase, do not look further.
@@ -1001,6 +1003,8 @@ CURLcode Curl_build_unencoding_stack(struct Curl_easy *data,
if((is_transfer && !data->set.http_transfer_encoding && !is_chunked) ||
(!is_transfer && data->set.http_ce_skip)) {
/* not requested, ignore */
+ CURL_TRC_WRITE(data, "decoder not requested, ignored: %.*s",
+ (int)namelen, name);
return CURLE_OK;
}
@@ -1018,6 +1022,7 @@ CURLcode Curl_build_unencoding_stack(struct Curl_easy *data,
* "A sender MUST NOT apply the chunked transfer coding more than
* once to a message body."
*/
+ CURL_TRC_WRITE(data, "ignoring duplicate 'chunked' decoder");
return CURLE_OK;
}
@@ -1040,6 +1045,8 @@ CURLcode Curl_build_unencoding_stack(struct Curl_easy *data,
cwt = &error_writer; /* Defer error at use. */
result = Curl_cwriter_create(&writer, data, cwt, phase);
+ CURL_TRC_WRITE(data, "added %s decoder %s -> %d",
+ is_transfer? "transfer" : "content", cwt->name, result);
if(result)
return result;
diff --git a/libs/libcurl/src/cookie.c b/libs/libcurl/src/cookie.c
index 020fa529ab..a098c17e82 100644
--- a/libs/libcurl/src/cookie.c
+++ b/libs/libcurl/src/cookie.c
@@ -61,7 +61,7 @@ struct Cookies *Curl_cookie_getlist(struct CookieInfo *cookie,
boolean informs the cookie if a secure connection is achieved or
not.
- It shall only return cookies that haven't expired.
+ It shall only return cookies that have not expired.
Example set of cookies:
@@ -150,7 +150,7 @@ static bool cookie_tailmatch(const char *cookie_domain,
}
/*
- * matching cookie path and url path
+ * matching cookie path and URL path
* RFC6265 5.1.4 Paths and Path-Match
*/
static bool pathmatch(const char *cookie_path, const char *request_uri)
@@ -262,8 +262,9 @@ static size_t cookie_hash_domain(const char *domain, const size_t len)
size_t h = 5381;
while(domain < end) {
+ size_t j = (size_t)Curl_raw_toupper(*domain++);
h += h << 5;
- h ^= Curl_raw_toupper(*domain++);
+ h ^= j;
}
return (h % COOKIE_HASH_SIZE);
@@ -373,7 +374,7 @@ static void strstore(char **str, const char *newstr, size_t len)
*
* Remove expired cookies from the hash by inspecting the expires timestamp on
* each cookie in the hash, freeing and deleting any where the timestamp is in
- * the past. If the cookiejar has recorded the next timestamp at which one or
+ * the past. If the cookiejar has recorded the next timestamp at which one or
* more cookies expire, then processing will exit early in case this timestamp
* is in the future.
*/
@@ -385,11 +386,11 @@ static void remove_expired(struct CookieInfo *cookies)
/*
* If the earliest expiration timestamp in the jar is in the future we can
- * skip scanning the whole jar and instead exit early as there won't be any
- * cookies to evict. If we need to evict however, reset the next_expiration
- * counter in order to track the next one. In case the recorded first
- * expiration is the max offset, then perform the safe fallback of checking
- * all cookies.
+ * skip scanning the whole jar and instead exit early as there will not be
+ * any cookies to evict. If we need to evict however, reset the
+ * next_expiration counter in order to track the next one. In case the
+ * recorded first expiration is the max offset, then perform the safe
+ * fallback of checking all cookies.
*/
if(now < cookies->next_expiration &&
cookies->next_expiration != CURL_OFF_T_MAX)
@@ -414,7 +415,7 @@ static void remove_expired(struct CookieInfo *cookies)
}
else {
/*
- * If this cookie has an expiration timestamp earlier than what we've
+ * If this cookie has an expiration timestamp earlier than what we have
* seen so far then record it for the next round of expirations.
*/
if(co->expires && co->expires < cookies->next_expiration)
@@ -473,7 +474,7 @@ static int invalid_octets(const char *p)
* Curl_cookie_add
*
* Add a single cookie line to the cookie keeping object. Be aware that
- * sometimes we get an IP-only host name, and that might also be a numerical
+ * sometimes we get an IP-only hostname, and that might also be a numerical
* IPv6 address.
*
* Returns NULL on out of memory or invalid cookie. This is suboptimal,
@@ -509,7 +510,7 @@ Curl_cookie_add(struct Curl_easy *data,
/* First, alloc and init a new struct for it */
co = calloc(1, sizeof(struct Cookie));
if(!co)
- return NULL; /* bail out if we're this low on memory */
+ return NULL; /* bail out if we are this low on memory */
if(httpheader) {
/* This line was read off an HTTP-header */
@@ -647,7 +648,7 @@ Curl_cookie_add(struct Curl_easy *data,
else if((nlen == 8) && strncasecompare("httponly", namep, 8))
co->httponly = TRUE;
else if(sep)
- /* there was a '=' so we're not done parsing this field */
+ /* there was a '=' so we are not done parsing this field */
done = FALSE;
}
if(done)
@@ -681,9 +682,9 @@ Curl_cookie_add(struct Curl_easy *data,
#ifndef USE_LIBPSL
/*
- * Without PSL we don't know when the incoming cookie is set on a
+ * Without PSL we do not know when the incoming cookie is set on a
* TLD or otherwise "protected" suffix. To reduce risk, we require a
- * dot OR the exact host name being "localhost".
+ * dot OR the exact hostname being "localhost".
*/
if(bad_domain(valuep, vlen))
domain = ":";
@@ -721,10 +722,10 @@ Curl_cookie_add(struct Curl_easy *data,
/*
* Defined in RFC2109:
*
- * Optional. The Max-Age attribute defines the lifetime of the
- * cookie, in seconds. The delta-seconds value is a decimal non-
- * negative integer. After delta-seconds seconds elapse, the
- * client should discard the cookie. A value of zero means the
+ * Optional. The Max-Age attribute defines the lifetime of the
+ * cookie, in seconds. The delta-seconds value is a decimal non-
+ * negative integer. After delta-seconds seconds elapse, the
+ * client should discard the cookie. A value of zero means the
* cookie should be discarded immediately.
*/
CURLofft offt;
@@ -780,7 +781,7 @@ Curl_cookie_add(struct Curl_easy *data,
}
/*
- * Else, this is the second (or more) name we don't know about!
+ * Else, this is the second (or more) name we do not know about!
*/
}
else {
@@ -806,7 +807,7 @@ Curl_cookie_add(struct Curl_easy *data,
if(!badcookie && !co->path && path) {
/*
- * No path was given in the header line, set the default. Note that the
+ * No path was given in the header line, set the default. Note that the
* passed-in path to this function MAY have a '?' and following part that
* MUST NOT be stored as part of the path.
*/
@@ -835,7 +836,7 @@ Curl_cookie_add(struct Curl_easy *data,
}
/*
- * If we didn't get a cookie name, or a bad one, the this is an illegal
+ * If we did not get a cookie name, or a bad one, the this is an illegal
* line so bail out.
*/
if(badcookie || !co->name) {
@@ -868,7 +869,7 @@ Curl_cookie_add(struct Curl_easy *data,
}
if(lineptr[0]=='#') {
- /* don't even try the comments */
+ /* do not even try the comments */
free(co);
return NULL;
}
@@ -908,7 +909,7 @@ Curl_cookie_add(struct Curl_easy *data,
case 2:
/* The file format allows the path field to remain not filled in */
if(strcmp("TRUE", ptr) && strcmp("FALSE", ptr)) {
- /* only if the path doesn't look like a boolean option! */
+ /* only if the path does not look like a boolean option! */
co->path = strdup(ptr);
if(!co->path)
badcookie = TRUE;
@@ -920,7 +921,7 @@ Curl_cookie_add(struct Curl_easy *data,
}
break;
}
- /* this doesn't look like a path, make one up! */
+ /* this does not look like a path, make one up! */
co->path = strdup("/");
if(!co->path)
badcookie = TRUE;
@@ -1003,7 +1004,7 @@ Curl_cookie_add(struct Curl_easy *data,
if(!c->running && /* read from a file */
c->newsession && /* clean session cookies */
- !co->expires) { /* this is a session cookie since it doesn't expire! */
+ !co->expires) { /* this is a session cookie since it does not expire! */
freecookie(co);
return NULL;
}
@@ -1024,7 +1025,7 @@ Curl_cookie_add(struct Curl_easy *data,
#ifdef USE_LIBPSL
/*
* Check if the domain is a Public Suffix and if yes, ignore the cookie. We
- * must also check that the data handle isn't NULL since the psl code will
+ * must also check that the data handle is not NULL since the psl code will
* dereference it.
*/
if(data && (domain && co->domain && !Curl_host_is_ipnum(co->domain))) {
@@ -1124,10 +1125,10 @@ Curl_cookie_add(struct Curl_easy *data,
if(replace_old && !co->livecookie && clist->livecookie) {
/*
- * Both cookies matched fine, except that the already present cookie is
- * "live", which means it was set from a header, while the new one was
- * read from a file and thus isn't "live". "live" cookies are preferred
- * so the new cookie is freed.
+ * Both cookies matched fine, except that the already present cookie
+ * is "live", which means it was set from a header, while the new one
+ * was read from a file and thus is not "live". "live" cookies are
+ * preferred so the new cookie is freed.
*/
freecookie(co);
return NULL;
@@ -1178,7 +1179,7 @@ Curl_cookie_add(struct Curl_easy *data,
}
/*
- * Now that we've added a new cookie to the jar, update the expiration
+ * Now that we have added a new cookie to the jar, update the expiration
* tracker in case it is the next one to expire.
*/
if(co->expires && (co->expires < c->next_expiration))
@@ -1211,12 +1212,12 @@ struct CookieInfo *Curl_cookie_init(struct Curl_easy *data,
FILE *handle = NULL;
if(!inc) {
- /* we didn't get a struct, create one */
+ /* we did not get a struct, create one */
c = calloc(1, sizeof(struct CookieInfo));
if(!c)
return NULL; /* failed to get memory */
/*
- * Initialize the next_expiration time to signal that we don't have enough
+ * Initialize the next_expiration time to signal that we do not have enough
* information yet.
*/
c->next_expiration = CURL_OFF_T_MAX;
@@ -1271,7 +1272,7 @@ struct CookieInfo *Curl_cookie_init(struct Curl_easy *data,
}
data->state.cookie_engine = TRUE;
}
- c->running = TRUE; /* now, we're running */
+ c->running = TRUE; /* now, we are running */
return c;
}
@@ -1367,7 +1368,7 @@ fail:
* should send to the server if used now. The secure boolean informs the cookie
* if a secure connection is achieved or not.
*
- * It shall only return cookies that haven't expired.
+ * It shall only return cookies that have not expired.
*/
struct Cookie *Curl_cookie_getlist(struct Curl_easy *data,
struct CookieInfo *c,
@@ -1393,7 +1394,7 @@ struct Cookie *Curl_cookie_getlist(struct Curl_easy *data,
co = c->cookies[myhash];
while(co) {
- /* if the cookie requires we're secure we must only continue if we are! */
+ /* if the cookie requires we are secure we must only continue if we are! */
if(co->secure?secure:TRUE) {
/* now check if the domain is correct */
@@ -1605,7 +1606,7 @@ static char *get_netscape_format(const struct Cookie *co)
* cookie_output()
*
* Writes all internally known cookies to the specified file. Specify
- * "-" as file name to write to stdout.
+ * "-" as filename to write to stdout.
*
* The function returns non-zero on write failure.
*/
diff --git a/libs/libcurl/src/cookie.h b/libs/libcurl/src/cookie.h
index e936625af8..3e36987a8d 100644
--- a/libs/libcurl/src/cookie.h
+++ b/libs/libcurl/src/cookie.h
@@ -75,7 +75,7 @@ struct CookieInfo {
/** Limits for INCOMING cookies **/
-/* The longest we allow a line to be when reading a cookie from a HTTP header
+/* The longest we allow a line to be when reading a cookie from an HTTP header
or from a cookie jar */
#define MAX_COOKIE_LINE 5000
diff --git a/libs/libcurl/src/curl_addrinfo.c b/libs/libcurl/src/curl_addrinfo.c
index 949482ff54..69437b70fd 100644
--- a/libs/libcurl/src/curl_addrinfo.c
+++ b/libs/libcurl/src/curl_addrinfo.c
@@ -95,7 +95,7 @@ Curl_freeaddrinfo(struct Curl_addrinfo *cahead)
* the only difference that instead of returning a linked list of
* addrinfo structs this one returns a linked list of Curl_addrinfo
* ones. The memory allocated by this function *MUST* be free'd with
- * Curl_freeaddrinfo(). For each successful call to this function
+ * Curl_freeaddrinfo(). For each successful call to this function
* there must be an associated call later to Curl_freeaddrinfo().
*
* There should be no single call to system's getaddrinfo() in the
@@ -221,7 +221,7 @@ Curl_getaddrinfo_ex(const char *nodename,
* stack, but usable also for IPv4, all hosts and environments.
*
* The memory allocated by this function *MUST* be free'd later on calling
- * Curl_freeaddrinfo(). For each successful call to this function there
+ * Curl_freeaddrinfo(). For each successful call to this function there
* must be an associated call later to Curl_freeaddrinfo().
*
* Curl_addrinfo defined in "lib/curl_addrinfo.h"
@@ -317,7 +317,11 @@ Curl_he2ai(const struct hostent *he, int port)
addr = (void *)ai->ai_addr; /* storage area for this info */
memcpy(&addr->sin_addr, curr, sizeof(struct in_addr));
+#ifdef __MINGW32__
+ addr->sin_family = (short)(he->h_addrtype);
+#else
addr->sin_family = (CURL_SA_FAMILY_T)(he->h_addrtype);
+#endif
addr->sin_port = htons((unsigned short)port);
break;
@@ -326,7 +330,11 @@ Curl_he2ai(const struct hostent *he, int port)
addr6 = (void *)ai->ai_addr; /* storage area for this info */
memcpy(&addr6->sin6_addr, curr, sizeof(struct in6_addr));
+#ifdef __MINGW32__
+ addr6->sin6_family = (short)(he->h_addrtype);
+#else
addr6->sin6_family = (CURL_SA_FAMILY_T)(he->h_addrtype);
+#endif
addr6->sin6_port = htons((unsigned short)port);
break;
#endif
@@ -359,7 +367,7 @@ struct namebuff {
/*
* Curl_ip2addr()
*
- * This function takes an internet address, in binary form, as input parameter
+ * This function takes an Internet address, in binary form, as input parameter
* along with its address family and the string version of the address, and it
* returns a Curl_addrinfo chain filled in correctly with information for the
* given address/host
@@ -511,7 +519,7 @@ struct Curl_addrinfo *Curl_unix2addr(const char *path, bool *longpath,
*
* This is strictly for memory tracing and are using the same style as the
* family otherwise present in memdebug.c. I put these ones here since they
- * require a bunch of structs I didn't want to include in memdebug.c
+ * require a bunch of structs I did not want to include in memdebug.c
*/
void
@@ -535,7 +543,7 @@ curl_dbg_freeaddrinfo(struct addrinfo *freethis,
*
* This is strictly for memory tracing and are using the same style as the
* family otherwise present in memdebug.c. I put these ones here since they
- * require a bunch of structs I didn't want to include in memdebug.c
+ * require a bunch of structs I did not want to include in memdebug.c
*/
int
diff --git a/libs/libcurl/src/curl_addrinfo.h b/libs/libcurl/src/curl_addrinfo.h
index b938a98bed..2bbe1c2b89 100644
--- a/libs/libcurl/src/curl_addrinfo.h
+++ b/libs/libcurl/src/curl_addrinfo.h
@@ -44,9 +44,9 @@
/*
* Curl_addrinfo is our internal struct definition that we use to allow
- * consistent internal handling of this data. We use this even when the
- * system provides an addrinfo structure definition. And we use this for
- * all sorts of IPv4 and IPV6 builds.
+ * consistent internal handling of this data. We use this even when the system
+ * provides an addrinfo structure definition. We use this for all sorts of
+ * IPv4 and IPV6 builds.
*/
struct Curl_addrinfo {
diff --git a/libs/libcurl/src/curl_config.h.cmake b/libs/libcurl/src/curl_config.h.cmake
index ef7e308e0d..c7ee19b047 100644
--- a/libs/libcurl/src/curl_config.h.cmake
+++ b/libs/libcurl/src/curl_config.h.cmake
@@ -21,7 +21,7 @@
* SPDX-License-Identifier: curl
*
***************************************************************************/
-/* lib/curl_config.h.in. Generated somehow by cmake. */
+/* lib/curl_config.h.in. Generated somehow by cmake. */
/* Location of default ca bundle */
#cmakedefine CURL_CA_BUNDLE "${CURL_CA_BUNDLE}"
@@ -368,12 +368,6 @@
/* Define to 1 if you have the idn2.h header file. */
#cmakedefine HAVE_IDN2_H 1
-/* Define to 1 if you have the `socket' library (-lsocket). */
-#cmakedefine HAVE_LIBSOCKET 1
-
-/* Define to 1 if you have the `ssh2' library (-lssh2). */
-#cmakedefine HAVE_LIBSSH2 1
-
/* if zlib is available */
#cmakedefine HAVE_LIBZ 1
@@ -419,6 +413,9 @@
/* Define to 1 if you have the `pipe' function. */
#cmakedefine HAVE_PIPE 1
+/* Define to 1 if you have the `eventfd' function. */
+#cmakedefine HAVE_EVENTFD 1
+
/* If you have a fine poll */
#cmakedefine HAVE_POLL_FINE 1
@@ -539,6 +536,9 @@
/* Define to 1 if you have the timeval struct. */
#cmakedefine HAVE_STRUCT_TIMEVAL 1
+/* Define to 1 if you have the <sys/eventfd.h> header file. */
+#cmakedefine HAVE_SYS_EVENTFD_H 1
+
/* Define to 1 if you have the <sys/filio.h> header file. */
#cmakedefine HAVE_SYS_FILIO_H 1
@@ -692,16 +692,22 @@ ${SIZEOF_TIME_T_CODE}
/* if BearSSL is enabled */
#cmakedefine USE_BEARSSL 1
-/* if WolfSSL is enabled */
+/* if wolfSSL is enabled */
#cmakedefine USE_WOLFSSL 1
-/* if libSSH is in use */
+/* if wolfSSL has the wolfSSL_DES_ecb_encrypt function. */
+#cmakedefine HAVE_WOLFSSL_DES_ECB_ENCRYPT 1
+
+/* if wolfSSL has the wolfSSL_BIO_set_shutdown function. */
+#cmakedefine HAVE_WOLFSSL_FULL_BIO 1
+
+/* if libssh is in use */
#cmakedefine USE_LIBSSH 1
-/* if libSSH2 is in use */
+/* if libssh2 is in use */
#cmakedefine USE_LIBSSH2 1
-/* if libPSL is in use */
+/* if libpsl is in use */
#cmakedefine USE_LIBPSL 1
/* if you want to use OpenLDAP code instead of legacy ldap implementation */
@@ -713,7 +719,10 @@ ${SIZEOF_TIME_T_CODE}
/* if librtmp/rtmpdump is in use */
#cmakedefine USE_LIBRTMP 1
-/* Define to 1 if you don't want the OpenSSL configuration to be loaded
+/* if GSASL is in use */
+#cmakedefine USE_GSASL 1
+
+/* Define to 1 if you do not want the OpenSSL configuration to be loaded
automatically */
#cmakedefine CURL_DISABLE_OPENSSL_AUTO_LOAD_CONFIG 1
diff --git a/libs/libcurl/src/curl_config.h.in b/libs/libcurl/src/curl_config.h.in
index c85b2f1b25..af491b43b1 100644
--- a/libs/libcurl/src/curl_config.h.in
+++ b/libs/libcurl/src/curl_config.h.in
@@ -218,6 +218,9 @@
/* Define to 1 if you have the <err.h> header file. */
#undef HAVE_ERR_H
+/* Define to 1 if you have the `eventfd' function. */
+#undef HAVE_EVENTFD
+
/* Define to 1 if you have the fcntl function. */
#undef HAVE_FCNTL
@@ -402,7 +405,7 @@
/* Define to 1 if you have the <libpsl.h> header file. */
#undef HAVE_LIBPSL_H
-/* Define to 1 if using libressl. */
+/* Define to 1 if using LibreSSL. */
#undef HAVE_LIBRESSL
/* Define to 1 if you have the <librtmp/rtmp.h> header file. */
@@ -669,6 +672,9 @@
/* Define to 1 if suseconds_t is an available type. */
#undef HAVE_SUSECONDS_T
+/* Define to 1 if you have the <sys/eventfd.h> header file. */
+#undef HAVE_SYS_EVENTFD_H
+
/* Define to 1 if you have the <sys/filio.h> header file. */
#undef HAVE_SYS_FILIO_H
@@ -869,10 +875,10 @@
/* if librtmp is in use */
#undef USE_LIBRTMP
-/* if libSSH is in use */
+/* if libssh is in use */
#undef USE_LIBSSH
-/* if libSSH2 is in use */
+/* if libssh2 is in use */
#undef USE_LIBSSH2
/* If you want to build curl with the built-in manual */
diff --git a/libs/libcurl/src/curl_des.c b/libs/libcurl/src/curl_des.c
index d8df054017..ce568e6e13 100644
--- a/libs/libcurl/src/curl_des.c
+++ b/libs/libcurl/src/curl_des.c
@@ -36,7 +36,7 @@
* Curl_des_set_odd_parity()
*
* This is used to apply odd parity to the given byte array. It is typically
- * used by when a cryptography engine doesn't have its own version.
+ * used by when a cryptography engine does not have its own version.
*
* The function is a port of the Java based oddParity() function over at:
*
diff --git a/libs/libcurl/src/curl_endian.c b/libs/libcurl/src/curl_endian.c
index dbb4984d0f..d5c76c216e 100644
--- a/libs/libcurl/src/curl_endian.c
+++ b/libs/libcurl/src/curl_endian.c
@@ -30,7 +30,7 @@
* Curl_read16_le()
*
* This function converts a 16-bit integer from the little endian format, as
- * used in the incoming package to whatever endian format we're using
+ * used in the incoming package to whatever endian format we are using
* natively.
*
* Parameters:
@@ -49,7 +49,7 @@ unsigned short Curl_read16_le(const unsigned char *buf)
* Curl_read32_le()
*
* This function converts a 32-bit integer from the little endian format, as
- * used in the incoming package to whatever endian format we're using
+ * used in the incoming package to whatever endian format we are using
* natively.
*
* Parameters:
@@ -68,7 +68,7 @@ unsigned int Curl_read32_le(const unsigned char *buf)
* Curl_read16_be()
*
* This function converts a 16-bit integer from the big endian format, as
- * used in the incoming package to whatever endian format we're using
+ * used in the incoming package to whatever endian format we are using
* natively.
*
* Parameters:
diff --git a/libs/libcurl/src/curl_fnmatch.c b/libs/libcurl/src/curl_fnmatch.c
index a6dcb9be68..a35b0d5abc 100644
--- a/libs/libcurl/src/curl_fnmatch.c
+++ b/libs/libcurl/src/curl_fnmatch.c
@@ -80,7 +80,7 @@ static int parsekeyword(unsigned char **pattern, unsigned char *charset)
unsigned char *p = *pattern;
bool found = FALSE;
for(i = 0; !found; i++) {
- char c = *p++;
+ char c = (char)*p++;
if(i >= KEYLEN)
return SETCHARSET_FAIL;
switch(state) {
diff --git a/libs/libcurl/src/curl_gethostname.c b/libs/libcurl/src/curl_gethostname.c
index c42482e478..c207e7bd8f 100644
--- a/libs/libcurl/src/curl_gethostname.c
+++ b/libs/libcurl/src/curl_gethostname.c
@@ -28,14 +28,14 @@
/*
* Curl_gethostname() is a wrapper around gethostname() which allows
- * overriding the host name that the function would normally return.
+ * overriding the hostname that the function would normally return.
* This capability is used by the test suite to verify exact matching
* of NTLM authentication, which exercises libcurl's MD4 and DES code
* as well as by the SMTP module when a hostname is not provided.
*
- * For libcurl debug enabled builds host name overriding takes place
+ * For libcurl debug enabled builds hostname overriding takes place
* when environment variable CURL_GETHOSTNAME is set, using the value
- * held by the variable to override returned host name.
+ * held by the variable to override returned hostname.
*
* Note: The function always returns the un-qualified hostname rather
* than being provider dependent.
@@ -45,7 +45,7 @@
* mechanism which intercepts, and might override, the gethostname()
* function call. In this case a given platform must support the
* LD_PRELOAD mechanism and additionally have environment variable
- * CURL_GETHOSTNAME set in order to override the returned host name.
+ * CURL_GETHOSTNAME set in order to override the returned hostname.
*
* For libcurl static library release builds no overriding takes place.
*/
@@ -65,7 +65,7 @@ int Curl_gethostname(char * const name, GETHOSTNAME_TYPE_ARG2 namelen)
#ifdef DEBUGBUILD
- /* Override host name when environment variable CURL_GETHOSTNAME is set */
+ /* Override hostname when environment variable CURL_GETHOSTNAME is set */
const char *force_hostname = getenv("CURL_GETHOSTNAME");
if(force_hostname) {
strncpy(name, force_hostname, namelen - 1);
diff --git a/libs/libcurl/src/curl_multibyte.h b/libs/libcurl/src/curl_multibyte.h
index e1f6a81451..814d900c09 100644
--- a/libs/libcurl/src/curl_multibyte.h
+++ b/libs/libcurl/src/curl_multibyte.h
@@ -49,9 +49,10 @@ char *curlx_convert_wchar_to_UTF8(const wchar_t *str_w);
* Allocated memory should be free'd with curlx_unicodefree().
*
* Note: Because these are curlx functions their memory usage is not tracked
- * by the curl memory tracker memdebug. You'll notice that curlx function-like
- * macros call free and strdup in parentheses, eg (strdup)(ptr), and that's to
- * ensure that the curl memdebug override macros do not replace them.
+ * by the curl memory tracker memdebug. you will notice that curlx
+ * function-like macros call free and strdup in parentheses, eg (strdup)(ptr),
+ * and that is to ensure that the curl memdebug override macros do not replace
+ * them.
*/
#if defined(UNICODE) && defined(_WIN32)
diff --git a/libs/libcurl/src/curl_ntlm_core.c b/libs/libcurl/src/curl_ntlm_core.c
index c11b4b8a11..7d2c47dde4 100644
--- a/libs/libcurl/src/curl_ntlm_core.c
+++ b/libs/libcurl/src/curl_ntlm_core.c
@@ -110,7 +110,7 @@
#elif defined(USE_WIN32_CRYPTO)
# include <wincrypt.h>
#else
-# error "Can't compile NTLM support without a crypto library with DES."
+# error "cannot compile NTLM support without a crypto library with DES."
# define CURL_NTLM_NOT_SUPPORTED
#endif
@@ -137,20 +137,20 @@
*/
static void extend_key_56_to_64(const unsigned char *key_56, char *key)
{
- key[0] = key_56[0];
- key[1] = (unsigned char)(((key_56[0] << 7) & 0xFF) | (key_56[1] >> 1));
- key[2] = (unsigned char)(((key_56[1] << 6) & 0xFF) | (key_56[2] >> 2));
- key[3] = (unsigned char)(((key_56[2] << 5) & 0xFF) | (key_56[3] >> 3));
- key[4] = (unsigned char)(((key_56[3] << 4) & 0xFF) | (key_56[4] >> 4));
- key[5] = (unsigned char)(((key_56[4] << 3) & 0xFF) | (key_56[5] >> 5));
- key[6] = (unsigned char)(((key_56[5] << 2) & 0xFF) | (key_56[6] >> 6));
- key[7] = (unsigned char) ((key_56[6] << 1) & 0xFF);
+ key[0] = (char)key_56[0];
+ key[1] = (char)(((key_56[0] << 7) & 0xFF) | (key_56[1] >> 1));
+ key[2] = (char)(((key_56[1] << 6) & 0xFF) | (key_56[2] >> 2));
+ key[3] = (char)(((key_56[2] << 5) & 0xFF) | (key_56[3] >> 3));
+ key[4] = (char)(((key_56[3] << 4) & 0xFF) | (key_56[4] >> 4));
+ key[5] = (char)(((key_56[4] << 3) & 0xFF) | (key_56[5] >> 5));
+ key[6] = (char)(((key_56[5] << 2) & 0xFF) | (key_56[6] >> 6));
+ key[7] = (char) ((key_56[6] << 1) & 0xFF);
}
#endif
#if defined(USE_OPENSSL_DES) || defined(USE_WOLFSSL)
/*
- * Turns a 56 bit key into the 64 bit, odd parity key and sets the key. The
+ * Turns a 56-bit key into a 64-bit, odd parity key and sets the key. The
* key schedule ks is also set.
*/
static void setup_des_key(const unsigned char *key_56,
@@ -158,7 +158,7 @@ static void setup_des_key(const unsigned char *key_56,
{
DES_cblock key;
- /* Expand the 56-bit key to 64-bits */
+ /* Expand the 56-bit key to 64 bits */
extend_key_56_to_64(key_56, (char *) &key);
/* Set the key parity to odd */
@@ -175,7 +175,7 @@ static void setup_des_key(const unsigned char *key_56,
{
char key[8];
- /* Expand the 56-bit key to 64-bits */
+ /* Expand the 56-bit key to 64 bits */
extend_key_56_to_64(key_56, key);
/* Set the key parity to odd */
@@ -193,7 +193,7 @@ static bool encrypt_des(const unsigned char *in, unsigned char *out,
mbedtls_des_context ctx;
char key[8];
- /* Expand the 56-bit key to 64-bits */
+ /* Expand the 56-bit key to 64 bits */
extend_key_56_to_64(key_56, key);
/* Set the key parity to odd */
@@ -214,7 +214,7 @@ static bool encrypt_des(const unsigned char *in, unsigned char *out,
size_t out_len;
CCCryptorStatus err;
- /* Expand the 56-bit key to 64-bits */
+ /* Expand the 56-bit key to 64 bits */
extend_key_56_to_64(key_56, key);
/* Set the key parity to odd */
@@ -240,7 +240,7 @@ static bool encrypt_des(const unsigned char *in, unsigned char *out,
ctl.Func_ID = ENCRYPT_ONLY;
ctl.Data_Len = sizeof(key);
- /* Expand the 56-bit key to 64-bits */
+ /* Expand the 56-bit key to 64 bits */
extend_key_56_to_64(key_56, ctl.Crypto_Key);
/* Set the key parity to odd */
@@ -278,7 +278,7 @@ static bool encrypt_des(const unsigned char *in, unsigned char *out,
blob.hdr.aiKeyAlg = CALG_DES;
blob.len = sizeof(blob.key);
- /* Expand the 56-bit key to 64-bits */
+ /* Expand the 56-bit key to 64 bits */
extend_key_56_to_64(key_56, blob.key);
/* Set the key parity to odd */
@@ -466,13 +466,13 @@ static void time2filetime(struct ms_filetime *ft, time_t t)
unsigned int r, s;
unsigned int i;
- ft->dwLowDateTime = t & 0xFFFFFFFF;
+ ft->dwLowDateTime = (unsigned int)t & 0xFFFFFFFF;
ft->dwHighDateTime = 0;
# ifndef HAVE_TIME_T_UNSIGNED
/* Extend sign if needed. */
if(ft->dwLowDateTime & 0x80000000)
- ft->dwHighDateTime = ~0;
+ ft->dwHighDateTime = ~(unsigned int)0;
# endif
/* Bias seconds to Jan 1, 1601.
diff --git a/libs/libcurl/src/curl_ntlm_core.h b/libs/libcurl/src/curl_ntlm_core.h
index 2df668c42f..40b191f5f5 100644
--- a/libs/libcurl/src/curl_ntlm_core.h
+++ b/libs/libcurl/src/curl_ntlm_core.h
@@ -28,13 +28,6 @@
#if defined(USE_CURL_NTLM_CORE)
-#if defined(USE_OPENSSL)
-# include <openssl/ssl.h>
-#elif defined(USE_WOLFSSL)
-# include <wolfssl/options.h>
-# include <wolfssl/openssl/ssl.h>
-#endif
-
/* Helpers to generate function byte arguments in little endian order */
#define SHORTPAIR(x) ((int)((x) & 0xff)), ((int)(((x) >> 8) & 0xff))
#define LONGQUARTET(x) ((int)((x) & 0xff)), ((int)(((x) >> 8) & 0xff)), \
diff --git a/libs/libcurl/src/curl_rtmp.c b/libs/libcurl/src/curl_rtmp.c
index 38b6646404..a8e5547d1d 100644
--- a/libs/libcurl/src/curl_rtmp.c
+++ b/libs/libcurl/src/curl_rtmp.c
@@ -236,7 +236,7 @@ static CURLcode rtmp_connect(struct Curl_easy *data, bool *done)
r->m_sb.sb_socket = (int)conn->sock[FIRSTSOCKET];
- /* We have to know if it's a write before we send the
+ /* We have to know if it is a write before we send the
* connect request packet
*/
if(data->state.upload)
@@ -273,10 +273,10 @@ static CURLcode rtmp_do(struct Curl_easy *data, bool *done)
if(data->state.upload) {
Curl_pgrsSetUploadSize(data, data->state.infilesize);
- Curl_xfer_setup(data, -1, -1, FALSE, FIRSTSOCKET);
+ Curl_xfer_setup1(data, CURL_XFER_SEND, -1, FALSE);
}
else
- Curl_xfer_setup(data, FIRSTSOCKET, -1, FALSE, -1);
+ Curl_xfer_setup1(data, CURL_XFER_RECV, -1, FALSE);
*done = TRUE;
return CURLE_OK;
}
diff --git a/libs/libcurl/src/curl_sasl.c b/libs/libcurl/src/curl_sasl.c
index c03fc0863d..729e192f41 100644
--- a/libs/libcurl/src/curl_sasl.c
+++ b/libs/libcurl/src/curl_sasl.c
@@ -328,7 +328,7 @@ bool Curl_sasl_can_authenticate(struct SASL *sasl, struct Curl_easy *data)
if(data->state.aptr.user)
return TRUE;
- /* EXTERNAL can authenticate without a user name and/or password */
+ /* EXTERNAL can authenticate without a username and/or password */
if(sasl->authmechs & sasl->prefmech & SASL_MECH_EXTERNAL)
return TRUE;
diff --git a/libs/libcurl/src/curl_setup.h b/libs/libcurl/src/curl_setup.h
index 974504a38b..8d43434b4e 100644
--- a/libs/libcurl/src/curl_setup.h
+++ b/libs/libcurl/src/curl_setup.h
@@ -40,6 +40,43 @@
#include <_mingw.h>
#endif
+/* Workaround for Homebrew gcc 12.4.0, 13.3.0, 14.1.0 and newer (as of 14.1.0)
+ that started advertising the `availability` attribute, which then gets used
+ by Apple SDK, but, in a way incompatible with gcc, resulting in a misc
+ errors inside SDK headers, e.g.:
+ error: attributes should be specified before the declarator in a function
+ definition
+ error: expected ',' or '}' before
+ Followed by missing declarations.
+ Fix it by overriding the built-in feature-check macro used by the headers
+ to enable the problematic attributes. This makes the feature check fail. */
+#if defined(__APPLE__) && \
+ !defined(__clang__) && \
+ defined(__GNUC__) && __GNUC__ >= 12 && \
+ defined(__has_attribute)
+#define availability curl_pp_attribute_disabled
+#endif
+
+#if defined(__APPLE__)
+#include <sys/types.h>
+#include <TargetConditionals.h>
+/* Fixup faulty target macro initialization in macOS SDK since v14.4 (as of
+ 15.0 beta). The SDK target detection in `TargetConditionals.h` correctly
+ detects macOS, but fails to set the macro's old name `TARGET_OS_OSX`, then
+ continues to set it to a default value of 0. Other parts of the SDK still
+ rely on the old name, and with this inconsistency our builds fail due to
+ missing declarations. It happens when using mainline llvm older than v18.
+ Later versions fixed it by predefining these target macros, avoiding the
+ faulty dynamic detection. gcc is not affected (for now) because it lacks
+ the necessary dynamic detection features, so the SDK falls back to
+ a codepath that sets both the old and new macro to 1. */
+#if defined(TARGET_OS_MAC) && TARGET_OS_MAC && \
+ defined(TARGET_OS_OSX) && !TARGET_OS_OSX
+#undef TARGET_OS_OSX
+#define TARGET_OS_OSX TARGET_OS_MAC
+#endif
+#endif
+
/*
* Disable Visual Studio warnings:
* 4127 "conditional expression is constant"
@@ -50,7 +87,7 @@
#ifdef _WIN32
/*
- * Don't include unneeded stuff in Windows headers to avoid compiler
+ * Do not include unneeded stuff in Windows headers to avoid compiler
* warnings and macro clashes.
* Make sure to define this macro before including any Windows headers.
*/
@@ -60,6 +97,16 @@
# ifndef NOGDI
# define NOGDI
# endif
+/* Detect Windows App environment which has a restricted access
+ * to the Win32 APIs. */
+# if (defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0602)) || \
+ defined(WINAPI_FAMILY)
+# include <winapifamily.h>
+# if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \
+ !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
+# define CURL_WINDOWS_APP
+# endif
+# endif
#endif
/* Compatibility */
@@ -296,13 +343,25 @@
#define CURL_PRINTF(fmt, arg)
#endif
+/* Workaround for mainline llvm v16 and earlier missing a built-in macro
+ expected by macOS SDK v14 / Xcode v15 (2023) and newer.
+ gcc (as of v14) is also missing it. */
+#if defined(__APPLE__) && \
+ ((!defined(__apple_build_version__) && \
+ defined(__clang__) && __clang_major__ < 17) || \
+ (defined(__GNUC__) && __GNUC__ <= 14)) && \
+ defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
+ !defined(__ENVIRONMENT_OS_VERSION_MIN_REQUIRED__)
+#define __ENVIRONMENT_OS_VERSION_MIN_REQUIRED__ \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__
+#endif
+
/*
* Use getaddrinfo to resolve the IPv4 address literal. If the current network
- * interface doesn't support IPv4, but supports IPv6, NAT64, and DNS64,
+ * interface does not support IPv4, but supports IPv6, NAT64, and DNS64,
* performing this task will result in a synthesized IPv6 address.
*/
#if defined(__APPLE__) && !defined(USE_ARES)
-#include <TargetConditionals.h>
#define USE_RESOLVE_ON_IPS 1
# if TARGET_OS_MAC && !(defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) && \
defined(USE_IPV6)
@@ -437,7 +496,7 @@
#endif
#ifndef SIZEOF_TIME_T
-/* assume default size of time_t to be 32 bit */
+/* assume default size of time_t to be 32 bits */
#define SIZEOF_TIME_T 4
#endif
@@ -460,7 +519,7 @@
#endif
/*
- * Default sizeof(off_t) in case it hasn't been defined in config file.
+ * Default sizeof(off_t) in case it has not been defined in config file.
*/
#ifndef SIZEOF_OFF_T
@@ -527,7 +586,7 @@
#endif
#ifndef SIZE_T_MAX
-/* some limits.h headers have this defined, some don't */
+/* some limits.h headers have this defined, some do not */
#if defined(SIZEOF_SIZE_T) && (SIZEOF_SIZE_T > 4)
#define SIZE_T_MAX 18446744073709551615U
#else
@@ -536,7 +595,7 @@
#endif
#ifndef SSIZE_T_MAX
-/* some limits.h headers have this defined, some don't */
+/* some limits.h headers have this defined, some do not */
#if defined(SIZEOF_SIZE_T) && (SIZEOF_SIZE_T > 4)
#define SSIZE_T_MAX 9223372036854775807
#else
@@ -545,7 +604,7 @@
#endif
/*
- * Arg 2 type for gethostname in case it hasn't been defined in config file.
+ * Arg 2 type for gethostname in case it has not been defined in config file.
*/
#ifndef GETHOSTNAME_TYPE_ARG2
@@ -760,7 +819,7 @@
#endif
/*
- * shutdown() flags for systems that don't define them
+ * shutdown() flags for systems that do not define them
*/
#ifndef SHUT_RD
@@ -808,7 +867,7 @@ endings either CRLF or LF so 't' is appropriate.
#define FOPEN_APPENDTEXT "a"
#endif
-/* for systems that don't detect this in configure */
+/* for systems that do not detect this in configure */
#ifndef CURL_SA_FAMILY_T
# if defined(HAVE_SA_FAMILY_T)
# define CURL_SA_FAMILY_T sa_family_t
@@ -837,7 +896,7 @@ int getpwuid_r(uid_t uid, struct passwd *pwd, char *buf,
size_t buflen, struct passwd **result);
#endif
-#ifdef DEBUGBUILD
+#ifdef UNITTESTS
#define UNITTEST
#else
#define UNITTEST static
diff --git a/libs/libcurl/src/curl_setup_once.h b/libs/libcurl/src/curl_setup_once.h
index ed72008f58..2c580fc444 100644
--- a/libs/libcurl/src/curl_setup_once.h
+++ b/libs/libcurl/src/curl_setup_once.h
@@ -106,7 +106,7 @@
#endif
/*
- * Definition of timeval struct for platforms that don't have it.
+ * Definition of timeval struct for platforms that do not have it.
*/
#ifndef HAVE_STRUCT_TIMEVAL
@@ -130,7 +130,7 @@ struct timeval {
#if defined(__minix)
-/* Minix doesn't support recv on TCP sockets */
+/* Minix does not support recv on TCP sockets */
#define sread(x,y,z) (ssize_t)read((RECV_TYPE_ARG1)(x), \
(RECV_TYPE_ARG2)(y), \
(RECV_TYPE_ARG3)(z))
@@ -143,7 +143,7 @@ struct timeval {
*
* HAVE_RECV is defined if you have a function named recv()
* which is used to read incoming data from sockets. If your
- * function has another name then don't define HAVE_RECV.
+ * function has another name then do not define HAVE_RECV.
*
* If HAVE_RECV is defined then RECV_TYPE_ARG1, RECV_TYPE_ARG2,
* RECV_TYPE_ARG3, RECV_TYPE_ARG4 and RECV_TYPE_RETV must also
@@ -151,7 +151,7 @@ struct timeval {
*
* HAVE_SEND is defined if you have a function named send()
* which is used to write outgoing data on a connected socket.
- * If yours has another name then don't define HAVE_SEND.
+ * If yours has another name then do not define HAVE_SEND.
*
* If HAVE_SEND is defined then SEND_TYPE_ARG1, SEND_QUAL_ARG2,
* SEND_TYPE_ARG2, SEND_TYPE_ARG3, SEND_TYPE_ARG4 and
@@ -170,7 +170,7 @@ struct timeval {
#if defined(__minix)
-/* Minix doesn't support send on TCP sockets */
+/* Minix does not support send on TCP sockets */
#define swrite(x,y,z) (ssize_t)write((SEND_TYPE_ARG1)(x), \
(SEND_TYPE_ARG2)(y), \
(SEND_TYPE_ARG3)(z))
@@ -226,7 +226,7 @@ struct timeval {
/*
* 'bool' exists on platforms with <stdbool.h>, i.e. C99 platforms.
- * On non-C99 platforms there's no bool, so define an enum for that.
+ * On non-C99 platforms there is no bool, so define an enum for that.
* On C99 platforms 'false' and 'true' also exist. Enum uses a
* global namespace though, so use bool_false and bool_true.
*/
@@ -238,7 +238,7 @@ struct timeval {
} bool;
/*
- * Use a define to let 'true' and 'false' use those enums. There
+ * Use a define to let 'true' and 'false' use those enums. There
* are currently no use of true and false in libcurl proper, but
* there are some in the examples. This will cater for any later
* code happening to use true and false.
diff --git a/libs/libcurl/src/curl_sha512_256.c b/libs/libcurl/src/curl_sha512_256.c
index bd96af1c4e..4697ffad6a 100644
--- a/libs/libcurl/src/curl_sha512_256.c
+++ b/libs/libcurl/src/curl_sha512_256.c
@@ -270,9 +270,9 @@ Curl_sha512_256_finish(unsigned char *digest,
* ** written by Evgeny Grin (Karlson2k) for GNU libmicrohttpd. ** *
* ** The author ported the code to libcurl. The ported code is provided ** *
* ** under curl license. ** *
- * ** This is a minimal version with minimal optimisations. Performance ** *
+ * ** This is a minimal version with minimal optimizations. Performance ** *
* ** can be significantly improved. Big-endian store and load macros ** *
- * ** are obvious targets for optimisation. ** */
+ * ** are obvious targets for optimization. ** */
#ifdef __GNUC__
# if defined(__has_attribute) && defined(__STDC_VERSION__)
@@ -328,7 +328,7 @@ MHDx_rotr64(curl_uint64_t value, unsigned int bits)
bits %= 64;
if(0 == bits)
return value;
- /* Defined in a form which modern compiler could optimise. */
+ /* Defined in a form which modern compiler could optimize. */
return (value >> bits) | (value << (64 - bits));
}
@@ -474,10 +474,10 @@ MHDx_sha512_256_transform(curl_uint64_t H[SHA512_256_HASH_SIZE_WORDS],
See FIPS PUB 180-4 section 5.2.2, 6.7, 6.4. */
curl_uint64_t W[16];
- /* 'Ch' and 'Maj' macro functions are defined with widely-used optimisation.
+ /* 'Ch' and 'Maj' macro functions are defined with widely-used optimization.
See FIPS PUB 180-4 formulae 4.8, 4.9. */
-#define Ch(x,y,z) ( (z) ^ ((x) & ((y) ^ (z))) )
-#define Maj(x,y,z) ( ((x) & (y)) ^ ((z) & ((x) ^ (y))) )
+#define Sha512_Ch(x,y,z) ( (z) ^ ((x) & ((y) ^ (z))) )
+#define Sha512_Maj(x,y,z) ( ((x) & (y)) ^ ((z) & ((x) ^ (y))) )
/* Four 'Sigma' macro functions.
See FIPS PUB 180-4 formulae 4.10, 4.11, 4.12, 4.13. */
@@ -547,9 +547,9 @@ MHDx_sha512_256_transform(curl_uint64_t H[SHA512_256_HASH_SIZE_WORDS],
* Note: 'wt' must be used exactly one time in this macro as macro for
'wt' calculation may change other data as well every time when
used. */
-#define SHA2STEP64(vA,vB,vC,vD,vE,vF,vG,vH,kt,wt) do { \
- (vD) += ((vH) += SIG1 ((vE)) + Ch ((vE),(vF),(vG)) + (kt) + (wt)); \
- (vH) += SIG0 ((vA)) + Maj ((vA),(vB),(vC)); } while (0)
+#define SHA2STEP64(vA,vB,vC,vD,vE,vF,vG,vH,kt,wt) do { \
+ (vD) += ((vH) += SIG1((vE)) + Sha512_Ch((vE),(vF),(vG)) + (kt) + (wt)); \
+ (vH) += SIG0((vA)) + Sha512_Maj((vA),(vB),(vC)); } while (0)
/* One step of SHA-512/256 computation with working variables rotation,
see FIPS PUB 180-4 section 6.4.2 step 3. This macro version reassigns
diff --git a/libs/libcurl/src/curl_sspi.c b/libs/libcurl/src/curl_sspi.c
index 6e9a6651b7..3e83a9801e 100644
--- a/libs/libcurl/src/curl_sspi.c
+++ b/libs/libcurl/src/curl_sspi.c
@@ -134,7 +134,7 @@ void Curl_sspi_global_cleanup(void)
*
* Parameters:
*
- * userp [in] - The user name in the format User or Domain\User.
+ * userp [in] - The username in the format User or Domain\User.
* passwdp [in] - The user's password.
* identity [in/out] - The identity structure.
*
diff --git a/libs/libcurl/src/curl_threads.c b/libs/libcurl/src/curl_threads.c
index ea343a1e23..7bb22850df 100644
--- a/libs/libcurl/src/curl_threads.c
+++ b/libs/libcurl/src/curl_threads.c
@@ -100,18 +100,23 @@ int Curl_thread_join(curl_thread_t *hnd)
#elif defined(USE_THREADS_WIN32)
-/* !checksrc! disable SPACEBEFOREPAREN 1 */
-curl_thread_t Curl_thread_create(unsigned int (CURL_STDCALL *func) (void *),
+curl_thread_t Curl_thread_create(
+#if defined(_WIN32_WCE) || defined(CURL_WINDOWS_APP)
+ DWORD
+#else
+ unsigned int
+#endif
+ (CURL_STDCALL *func) (void *),
void *arg)
{
-#ifdef _WIN32_WCE
+#if defined(_WIN32_WCE) || defined(CURL_WINDOWS_APP)
typedef HANDLE curl_win_thread_handle_t;
#else
typedef uintptr_t curl_win_thread_handle_t;
#endif
curl_thread_t t;
curl_win_thread_handle_t thread_handle;
-#ifdef _WIN32_WCE
+#if defined(_WIN32_WCE) || defined(CURL_WINDOWS_APP)
thread_handle = CreateThread(NULL, 0, func, arg, 0, NULL);
#else
thread_handle = _beginthreadex(NULL, 0, func, arg, 0, NULL);
diff --git a/libs/libcurl/src/curl_threads.h b/libs/libcurl/src/curl_threads.h
index bf7c4a6b1e..3a10c04a92 100644
--- a/libs/libcurl/src/curl_threads.h
+++ b/libs/libcurl/src/curl_threads.h
@@ -52,8 +52,13 @@
#if defined(USE_THREADS_POSIX) || defined(USE_THREADS_WIN32)
-/* !checksrc! disable SPACEBEFOREPAREN 1 */
-curl_thread_t Curl_thread_create(unsigned int (CURL_STDCALL *func) (void *),
+curl_thread_t Curl_thread_create(
+#if defined(_WIN32_WCE) || defined(CURL_WINDOWS_APP)
+ DWORD
+#else
+ unsigned int
+#endif
+ (CURL_STDCALL *func) (void *),
void *arg);
void Curl_thread_destroy(curl_thread_t hnd);
diff --git a/libs/libcurl/src/cw-out.c b/libs/libcurl/src/cw-out.c
index bf6250c519..7bce386df0 100644
--- a/libs/libcurl/src/cw-out.c
+++ b/libs/libcurl/src/cw-out.c
@@ -228,8 +228,8 @@ static CURLcode cw_out_ptr_flush(struct cw_out_ctx *ctx,
if(CURL_WRITEFUNC_PAUSE == nwritten) {
if(data->conn && data->conn->handler->flags & PROTOPT_NONETWORK) {
/* Protocols that work without network cannot be paused. This is
- actually only FILE:// just now, and it can't pause since the
- transfer isn't done using the "normal" procedure. */
+ actually only FILE:// just now, and it cannot pause since the
+ transfer is not done using the "normal" procedure. */
failf(data, "Write callback asked for PAUSE when not supported");
return CURLE_WRITE_ERROR;
}
diff --git a/libs/libcurl/src/dict.c b/libs/libcurl/src/dict.c
index 5e79b1cf3f..66cc6f300f 100644
--- a/libs/libcurl/src/dict.c
+++ b/libs/libcurl/src/dict.c
@@ -241,7 +241,7 @@ static CURLcode dict_do(struct Curl_easy *data, bool *done)
failf(data, "Failed sending DICT request");
goto error;
}
- Curl_xfer_setup(data, FIRSTSOCKET, -1, FALSE, -1); /* no upload */
+ Curl_xfer_setup1(data, CURL_XFER_RECV, -1, FALSE); /* no upload */
}
else if(strncasecompare(path, DICT_DEFINE, sizeof(DICT_DEFINE)-1) ||
strncasecompare(path, DICT_DEFINE2, sizeof(DICT_DEFINE2)-1) ||
@@ -287,7 +287,7 @@ static CURLcode dict_do(struct Curl_easy *data, bool *done)
failf(data, "Failed sending DICT request");
goto error;
}
- Curl_xfer_setup(data, FIRSTSOCKET, -1, FALSE, -1);
+ Curl_xfer_setup1(data, CURL_XFER_RECV, -1, FALSE);
}
else {
@@ -309,7 +309,7 @@ static CURLcode dict_do(struct Curl_easy *data, bool *done)
goto error;
}
- Curl_xfer_setup(data, FIRSTSOCKET, -1, FALSE, -1);
+ Curl_xfer_setup1(data, CURL_XFER_RECV, -1, FALSE);
}
}
diff --git a/libs/libcurl/src/doh.c b/libs/libcurl/src/doh.c
index 8164cf5b78..9653f32639 100644
--- a/libs/libcurl/src/doh.c
+++ b/libs/libcurl/src/doh.c
@@ -93,21 +93,21 @@ UNITTEST DOHcode doh_encode(const char *host,
const char *hostp = host;
/* The expected output length is 16 bytes more than the length of
- * the QNAME-encoding of the host name.
+ * the QNAME-encoding of the hostname.
*
* A valid DNS name may not contain a zero-length label, except at
- * the end. For this reason, a name beginning with a dot, or
+ * the end. For this reason, a name beginning with a dot, or
* containing a sequence of two or more consecutive dots, is invalid
* and cannot be encoded as a QNAME.
*
- * If the host name ends with a trailing dot, the corresponding
- * QNAME-encoding is one byte longer than the host name. If (as is
+ * If the hostname ends with a trailing dot, the corresponding
+ * QNAME-encoding is one byte longer than the hostname. If (as is
* also valid) the hostname is shortened by the omission of the
* trailing dot, then its QNAME-encoding will be two bytes longer
- * than the host name.
+ * than the hostname.
*
* Each [ label, dot ] pair is encoded as [ length, label ],
- * preserving overall length. A final [ label ] without a dot is
+ * preserving overall length. A final [ label ] without a dot is
* also encoded as [ length, label ], increasing overall length
* by one. The encoding is completed by appending a zero byte,
* representing the zero-length root label, again increasing
@@ -191,7 +191,7 @@ doh_write_cb(const void *contents, size_t size, size_t nmemb, void *userp)
return realsize;
}
-#if defined(USE_HTTPSRR) && defined(CURLDEBUG)
+#if defined(USE_HTTPSRR) && defined(DEBUGBUILD)
static void local_print_buf(struct Curl_easy *data,
const char *prefix,
unsigned char *buf, size_t len)
@@ -285,7 +285,7 @@ static CURLcode dohprobe(struct Curl_easy *data,
ERROR_CHECK_SETOPT(CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2TLS);
ERROR_CHECK_SETOPT(CURLOPT_PIPEWAIT, 1L);
#endif
-#ifndef CURLDEBUG
+#ifndef DEBUGBUILD
/* enforce HTTPS if not debug */
ERROR_CHECK_SETOPT(CURLOPT_PROTOCOLS, CURLPROTO_HTTPS);
#else
@@ -400,7 +400,6 @@ struct Curl_addrinfo *Curl_doh(struct Curl_easy *data,
int *waitp)
{
CURLcode result = CURLE_OK;
- int slot;
struct dohdata *dohp;
struct connectdata *conn = data->conn;
#ifdef USE_HTTPSRR
@@ -455,9 +454,9 @@ struct Curl_addrinfo *Curl_doh(struct Curl_easy *data,
* TODO: Figure out the conditions under which we want to make
* a request for an HTTPS RR when we are not doing ECH. For now,
* making this request breaks a bunch of DoH tests, e.g. test2100,
- * where the additional request doesn't match the pre-cooked data
- * files, so there's a bit of work attached to making the request
- * in a non-ECH use-case. For the present, we'll only make the
+ * where the additional request does not match the pre-cooked data
+ * files, so there is a bit of work attached to making the request
+ * in a non-ECH use-case. For the present, we will only make the
* request when ECH is enabled in the build and is being used for
* the curl operation.
*/
@@ -473,7 +472,7 @@ struct Curl_addrinfo *Curl_doh(struct Curl_easy *data,
result = dohprobe(data, &dohp->probe[DOH_PROBE_SLOT_HTTPS],
DNS_TYPE_HTTPS, qname, data->set.str[STRING_DOH],
data->multi, dohp->headers);
- free(qname);
+ Curl_safefree(qname);
if(result)
goto error;
dohp->pending++;
@@ -484,13 +483,7 @@ struct Curl_addrinfo *Curl_doh(struct Curl_easy *data,
return NULL;
error:
- curl_slist_free_all(dohp->headers);
- data->req.doh->headers = NULL;
- for(slot = 0; slot < DOH_PROBE_SLOTS; slot++) {
- (void)curl_multi_remove_handle(data->multi, dohp->probe[slot].easy);
- Curl_close(&dohp->probe[slot].easy);
- }
- Curl_safefree(data->req.doh);
+ Curl_doh_cleanup(data);
return NULL;
}
@@ -518,12 +511,12 @@ static DOHcode skipqname(const unsigned char *doh, size_t dohlen,
return DOH_OK;
}
-static unsigned short get16bit(const unsigned char *doh, int index)
+static unsigned short get16bit(const unsigned char *doh, unsigned int index)
{
return (unsigned short)((doh[index] << 8) | doh[index + 1]);
}
-static unsigned int get32bit(const unsigned char *doh, int index)
+static unsigned int get32bit(const unsigned char *doh, unsigned int index)
{
/* make clang and gcc optimize this to bswap by incrementing
the pointer first. */
@@ -531,7 +524,7 @@ static unsigned int get32bit(const unsigned char *doh, int index)
/* avoid undefined behavior by casting to unsigned before shifting
24 bits, possibly into the sign bit. codegen is same, but
- ub sanitizer won't be upset */
+ ub sanitizer will not be upset */
return ((unsigned)doh[0] << 24) | ((unsigned)doh[1] << 16) |
((unsigned)doh[2] << 8) | doh[3];
}
@@ -606,7 +599,7 @@ static DOHcode store_cname(const unsigned char *doh,
/* move to the new index */
newpos = (length & 0x3f) << 8 | doh[index + 1];
- index = newpos;
+ index = (unsigned int)newpos;
continue;
}
else if(length & 0xc0)
@@ -670,7 +663,7 @@ static DOHcode rdata(const unsigned char *doh,
break;
#endif
case DNS_TYPE_CNAME:
- rc = store_cname(doh, dohlen, index, d);
+ rc = store_cname(doh, dohlen, (unsigned int)index, d);
if(rc)
return rc;
break;
@@ -771,7 +764,7 @@ UNITTEST DOHcode doh_decode(const unsigned char *doh,
if(dohlen < (index + rdlength))
return DOH_DNS_OUT_OF_RANGE;
- rc = rdata(doh, dohlen, rdlength, type, index, d);
+ rc = rdata(doh, dohlen, rdlength, type, (int)index, d);
if(rc)
return rc; /* bad rdata */
index += rdlength;
@@ -870,7 +863,7 @@ static void showdoh(struct Curl_easy *data,
}
#ifdef USE_HTTPSRR
for(i = 0; i < d->numhttps_rrs; i++) {
-# ifdef CURLDEBUG
+# ifdef DEBUGBUILD
local_print_buf(data, "DoH HTTPS",
d->https_rrs[i].val, d->https_rrs[i].len);
# else
@@ -891,11 +884,11 @@ static void showdoh(struct Curl_easy *data,
*
* This function returns a pointer to the first element of a newly allocated
* Curl_addrinfo struct linked list filled with the data from a set of DoH
- * lookups. Curl_addrinfo is meant to work like the addrinfo struct does for
+ * lookups. Curl_addrinfo is meant to work like the addrinfo struct does for
* a IPv6 stack, but usable also for IPv4, all hosts and environments.
*
* The memory allocated by this function *MUST* be free'd later on calling
- * Curl_freeaddrinfo(). For each successful call to this function there
+ * Curl_freeaddrinfo(). For each successful call to this function there
* must be an associated call later to Curl_freeaddrinfo().
*/
@@ -923,7 +916,7 @@ static CURLcode doh2ai(const struct dohentry *de, const char *hostname,
CURL_SA_FAMILY_T addrtype;
if(de->addr[i].type == DNS_TYPE_AAAA) {
#ifndef USE_IPV6
- /* we can't handle IPv6 addresses */
+ /* we cannot handle IPv6 addresses */
continue;
#else
ss_size = sizeof(struct sockaddr_in6);
@@ -967,7 +960,11 @@ static CURLcode doh2ai(const struct dohentry *de, const char *hostname,
addr = (void *)ai->ai_addr; /* storage area for this info */
DEBUGASSERT(sizeof(struct in_addr) == sizeof(de->addr[i].ip.v4));
memcpy(&addr->sin_addr, &de->addr[i].ip.v4, sizeof(struct in_addr));
+#ifdef __MINGW32__
+ addr->sin_family = (short)addrtype;
+#else
addr->sin_family = addrtype;
+#endif
addr->sin_port = htons((unsigned short)port);
break;
@@ -976,7 +973,11 @@ static CURLcode doh2ai(const struct dohentry *de, const char *hostname,
addr6 = (void *)ai->ai_addr; /* storage area for this info */
DEBUGASSERT(sizeof(struct in6_addr) == sizeof(de->addr[i].ip.v6));
memcpy(&addr6->sin6_addr, &de->addr[i].ip.v6, sizeof(struct in6_addr));
+#ifdef __MINGW32__
+ addr6->sin6_family = (short)addrtype;
+#else
addr6->sin6_family = addrtype;
+#endif
addr6->sin6_port = htons((unsigned short)port);
break;
#endif
@@ -1020,7 +1021,7 @@ UNITTEST void de_cleanup(struct dohentry *d)
}
#ifdef USE_HTTPSRR
for(i = 0; i < d->numhttps_rrs; i++)
- free(d->https_rrs[i].val);
+ Curl_safefree(d->https_rrs[i].val);
#endif
}
@@ -1038,7 +1039,7 @@ UNITTEST void de_cleanup(struct dohentry *d)
*
* The input buffer pointer will be modified so it points to
* just after the end of the DNS name encoding on output. (And
- * that's why it's an "unsigned char **" :-)
+ * that is why it is an "unsigned char **" :-)
*/
static CURLcode local_decode_rdata_name(unsigned char **buf, size_t *remaining,
char **dnsname)
@@ -1097,7 +1098,7 @@ static CURLcode local_decode_rdata_alpn(unsigned char *rrval, size_t len,
* output is comma-sep list of the strings
* implementations may or may not handle quoting of comma within
* string values, so we might see a comma within the wire format
- * version of a string, in which case we'll precede that by a
+ * version of a string, in which case we will precede that by a
* backslash - same goes for a backslash character, and of course
* we need to use two backslashes in strings when we mean one;-)
*/
@@ -1143,10 +1144,10 @@ err:
return CURLE_BAD_CONTENT_ENCODING;
}
-#ifdef CURLDEBUG
+#ifdef DEBUGBUILD
static CURLcode test_alpn_escapes(void)
{
- /* we'll use an example from draft-ietf-dnsop-svcb, figure 10 */
+ /* we will use an example from draft-ietf-dnsop-svcb, figure 10 */
static unsigned char example[] = {
0x08, /* length 8 */
0x66, 0x5c, 0x6f, 0x6f, 0x2c, 0x62, 0x61, 0x72, /* value "f\\oo,bar" */
@@ -1176,8 +1177,8 @@ static CURLcode Curl_doh_decode_httpsrr(unsigned char *rrval, size_t len,
struct Curl_https_rrinfo *lhrr = NULL;
char *dnsname = NULL;
-#ifdef CURLDEBUG
- /* a few tests of escaping, shouldn't be here but ok for now */
+#ifdef DEBUGBUILD
+ /* a few tests of escaping, should not be here but ok for now */
if(test_alpn_escapes() != CURLE_OK)
return CURLE_OUT_OF_MEMORY;
#endif
@@ -1209,18 +1210,24 @@ static CURLcode Curl_doh_decode_httpsrr(unsigned char *rrval, size_t len,
if(pcode == HTTPS_RR_CODE_NO_DEF_ALPN)
lhrr->no_def_alpn = TRUE;
else if(pcode == HTTPS_RR_CODE_IPV4) {
+ if(!plen)
+ goto err;
lhrr->ipv4hints = Curl_memdup(cp, plen);
if(!lhrr->ipv4hints)
goto err;
lhrr->ipv4hints_len = (size_t)plen;
}
else if(pcode == HTTPS_RR_CODE_ECH) {
+ if(!plen)
+ goto err;
lhrr->echconfiglist = Curl_memdup(cp, plen);
if(!lhrr->echconfiglist)
goto err;
lhrr->echconfiglist_len = (size_t)plen;
}
else if(pcode == HTTPS_RR_CODE_IPV6) {
+ if(!plen)
+ goto err;
lhrr->ipv6hints = Curl_memdup(cp, plen);
if(!lhrr->ipv6hints)
goto err;
@@ -1236,15 +1243,16 @@ static CURLcode Curl_doh_decode_httpsrr(unsigned char *rrval, size_t len,
return CURLE_OK;
err:
if(lhrr) {
- free(lhrr->target);
- free(lhrr->echconfiglist);
- free(lhrr->val);
- free(lhrr);
+ Curl_safefree(lhrr->target);
+ Curl_safefree(lhrr->echconfiglist);
+ Curl_safefree(lhrr->val);
+ Curl_safefree(lhrr->alpns);
+ Curl_safefree(lhrr);
}
return CURLE_OUT_OF_MEMORY;
}
-# ifdef CURLDEBUG
+# ifdef DEBUGBUILD
static void local_print_httpsrr(struct Curl_easy *data,
struct Curl_https_rrinfo *hrr)
{
@@ -1310,10 +1318,7 @@ CURLcode Curl_doh_is_resolved(struct Curl_easy *data,
struct dohentry de;
int slot;
/* remove DoH handles from multi handle and close them */
- for(slot = 0; slot < DOH_PROBE_SLOTS; slot++) {
- curl_multi_remove_handle(data->multi, dohp->probe[slot].easy);
- Curl_close(&dohp->probe[slot].easy);
- }
+ Curl_doh_close(data);
/* parse the responses, create the struct and return it! */
de_init(&de);
for(slot = 0; slot < DOH_PROBE_SLOTS; slot++) {
@@ -1341,7 +1346,7 @@ CURLcode Curl_doh_is_resolved(struct Curl_easy *data,
if(Curl_trc_ft_is_verbose(data, &Curl_doh_trc)) {
- infof(data, "[DoH] Host name: %s", dohp->host);
+ infof(data, "[DoH] hostname: %s", dohp->host);
showdoh(data, &de);
}
@@ -1382,7 +1387,7 @@ CURLcode Curl_doh_is_resolved(struct Curl_easy *data,
return result;
}
infof(data, "Some HTTPS RR to process");
-# ifdef CURLDEBUG
+# ifdef DEBUGBUILD
local_print_httpsrr(data, hrr);
# endif
(*dnsp)->hinfo = hrr;
@@ -1400,4 +1405,32 @@ CURLcode Curl_doh_is_resolved(struct Curl_easy *data,
return CURLE_OK;
}
+void Curl_doh_close(struct Curl_easy *data)
+{
+ struct dohdata *doh = data->req.doh;
+ if(doh) {
+ size_t slot;
+ for(slot = 0; slot < DOH_PROBE_SLOTS; slot++) {
+ if(!doh->probe[slot].easy)
+ continue;
+ /* data->multi might already be reset at this time */
+ if(doh->probe[slot].easy->multi)
+ curl_multi_remove_handle(doh->probe[slot].easy->multi,
+ doh->probe[slot].easy);
+ Curl_close(&doh->probe[slot].easy);
+ }
+ }
+}
+
+void Curl_doh_cleanup(struct Curl_easy *data)
+{
+ struct dohdata *doh = data->req.doh;
+ if(doh) {
+ Curl_doh_close(data);
+ curl_slist_free_all(doh->headers);
+ data->req.doh->headers = NULL;
+ Curl_safefree(data->req.doh);
+ }
+}
+
#endif /* CURL_DISABLE_DOH */
diff --git a/libs/libcurl/src/doh.h b/libs/libcurl/src/doh.h
index b8b8a18ebe..a8cc480bd3 100644
--- a/libs/libcurl/src/doh.h
+++ b/libs/libcurl/src/doh.h
@@ -140,19 +140,22 @@ struct dohentry {
#endif
};
-
-#ifdef DEBUGBUILD
-DOHcode doh_encode(const char *host,
- DNStype dnstype,
- unsigned char *dnsp, /* buffer */
- size_t len, /* buffer size */
- size_t *olen); /* output length */
-DOHcode doh_decode(const unsigned char *doh,
- size_t dohlen,
- DNStype dnstype,
- struct dohentry *d);
-void de_init(struct dohentry *d);
-void de_cleanup(struct dohentry *d);
+void Curl_doh_close(struct Curl_easy *data);
+void Curl_doh_cleanup(struct Curl_easy *data);
+
+#ifdef UNITTESTS
+UNITTEST DOHcode doh_encode(const char *host,
+ DNStype dnstype,
+ unsigned char *dnsp, /* buffer */
+ size_t len, /* buffer size */
+ size_t *olen); /* output length */
+UNITTEST DOHcode doh_decode(const unsigned char *doh,
+ size_t dohlen,
+ DNStype dnstype,
+ struct dohentry *d);
+
+UNITTEST void de_init(struct dohentry *d);
+UNITTEST void de_cleanup(struct dohentry *d);
#endif
extern struct curl_trc_feat Curl_doh_trc;
diff --git a/libs/libcurl/src/dynbuf.c b/libs/libcurl/src/dynbuf.c
index 78ef1039df..f50fb055c1 100644
--- a/libs/libcurl/src/dynbuf.c
+++ b/libs/libcurl/src/dynbuf.c
@@ -51,7 +51,7 @@ void Curl_dyn_init(struct dynbuf *s, size_t toobig)
}
/*
- * free the buffer and re-init the necessary fields. It doesn't touch the
+ * free the buffer and re-init the necessary fields. It does not touch the
* 'init' field and thus this buffer can be reused to add data to again.
*/
void Curl_dyn_free(struct dynbuf *s)
@@ -71,7 +71,7 @@ static CURLcode dyn_nappend(struct dynbuf *s,
size_t a = s->allc;
size_t fit = len + indx + 1; /* new string + old string + zero byte */
- /* try to detect if there's rubbish in the struct */
+ /* try to detect if there is rubbish in the struct */
DEBUGASSERT(s->init == DYNINIT);
DEBUGASSERT(s->toobig);
DEBUGASSERT(indx < s->toobig);
diff --git a/libs/libcurl/src/dynhds.c b/libs/libcurl/src/dynhds.c
index 465c09baca..a5e69d868a 100644
--- a/libs/libcurl/src/dynhds.c
+++ b/libs/libcurl/src/dynhds.c
@@ -275,7 +275,7 @@ CURLcode Curl_dynhds_h1_cadd_line(struct dynhds *dynhds, const char *line)
return Curl_dynhds_h1_add_line(dynhds, line, line? strlen(line) : 0);
}
-#ifdef DEBUGBUILD
+#ifdef UNITTESTS
/* used by unit2602.c */
bool Curl_dynhds_contains(struct dynhds *dynhds,
diff --git a/libs/libcurl/src/dynhds.h b/libs/libcurl/src/dynhds.h
index 215087c6f4..f2af6668d6 100644
--- a/libs/libcurl/src/dynhds.h
+++ b/libs/libcurl/src/dynhds.h
@@ -95,6 +95,9 @@ struct dynhds_entry *Curl_dynhds_get(struct dynhds *dynhds,
const char *name, size_t namelen);
struct dynhds_entry *Curl_dynhds_cget(struct dynhds *dynhds, const char *name);
+#ifdef UNITTESTS
+/* used by unit2602.c */
+
/**
* Return TRUE iff one or more headers with the given name exist.
*/
@@ -116,20 +119,6 @@ size_t Curl_dynhds_count_name(struct dynhds *dynhds,
size_t Curl_dynhds_ccount_name(struct dynhds *dynhds, const char *name);
/**
- * Add a header, name + value, to `dynhds` at the end. Does *not*
- * check for duplicate names.
- */
-CURLcode Curl_dynhds_add(struct dynhds *dynhds,
- const char *name, size_t namelen,
- const char *value, size_t valuelen);
-
-/**
- * Add a header, c-string name + value, to `dynhds` at the end.
- */
-CURLcode Curl_dynhds_cadd(struct dynhds *dynhds,
- const char *name, const char *value);
-
-/**
* Remove all entries with the given name.
* Returns number of entries removed.
*/
@@ -146,19 +135,34 @@ size_t Curl_dynhds_cremove(struct dynhds *dynhds, const char *name);
CURLcode Curl_dynhds_set(struct dynhds *dynhds,
const char *name, size_t namelen,
const char *value, size_t valuelen);
+#endif
CURLcode Curl_dynhds_cset(struct dynhds *dynhds,
const char *name, const char *value);
/**
- * Add a single header from a HTTP/1.1 formatted line at the end. Line
+ * Add a header, name + value, to `dynhds` at the end. Does *not*
+ * check for duplicate names.
+ */
+CURLcode Curl_dynhds_add(struct dynhds *dynhds,
+ const char *name, size_t namelen,
+ const char *value, size_t valuelen);
+
+/**
+ * Add a header, c-string name + value, to `dynhds` at the end.
+ */
+CURLcode Curl_dynhds_cadd(struct dynhds *dynhds,
+ const char *name, const char *value);
+
+/**
+ * Add a single header from an HTTP/1.1 formatted line at the end. Line
* may contain a delimiting \r\n or just \n. Any characters after
* that will be ignored.
*/
CURLcode Curl_dynhds_h1_cadd_line(struct dynhds *dynhds, const char *line);
/**
- * Add a single header from a HTTP/1.1 formatted line at the end. Line
+ * Add a single header from an HTTP/1.1 formatted line at the end. Line
* may contain a delimiting \r\n or just \n. Any characters after
* that will be ignored.
*/
diff --git a/libs/libcurl/src/easy.c b/libs/libcurl/src/easy.c
index 8dfeea825f..6e5b91a76e 100644
--- a/libs/libcurl/src/easy.c
+++ b/libs/libcurl/src/easy.c
@@ -242,7 +242,7 @@ CURLcode curl_global_init_mem(long flags, curl_malloc_callback m,
global_init_lock();
if(initialized) {
- /* Already initialized, don't do it again, but bump the variable anyway to
+ /* Already initialized, do not do it again, but bump the variable anyway to
work like curl_global_init() and require the same amount of cleanup
calls. */
initialized++;
@@ -268,7 +268,8 @@ CURLcode curl_global_init_mem(long flags, curl_malloc_callback m,
/**
* curl_global_cleanup() globally cleanups curl, uses the value of
- * "easy_init_flags" to determine what needs to be cleaned up and what doesn't.
+ * "easy_init_flags" to determine what needs to be cleaned up and what does
+ * not.
*/
void curl_global_cleanup(void)
{
@@ -374,7 +375,7 @@ struct Curl_easy *curl_easy_init(void)
return data;
}
-#ifdef CURLDEBUG
+#ifdef DEBUGBUILD
struct socketmonitor {
struct socketmonitor *next; /* the next node in the list or NULL */
@@ -579,7 +580,7 @@ static CURLcode wait_or_timeout(struct Curl_multi *multi, struct events *ev)
before = Curl_now();
/* wait for activity or timeout */
- pollrc = Curl_poll(fds, numfds, ev->ms);
+ pollrc = Curl_poll(fds, (unsigned int)numfds, ev->ms);
if(pollrc < 0)
return CURLE_UNRECOVERABLE_POLL;
@@ -627,7 +628,7 @@ static CURLcode wait_or_timeout(struct Curl_multi *multi, struct events *ev)
if(mcode)
return CURLE_URL_MALFORMAT;
- /* we don't really care about the "msgs_in_queue" value returned in the
+ /* we do not really care about the "msgs_in_queue" value returned in the
second argument */
msg = curl_multi_info_read(multi, &pollrc);
if(msg) {
@@ -655,8 +656,8 @@ static CURLcode easy_events(struct Curl_multi *multi)
return wait_or_timeout(multi, &evs);
}
-#else /* CURLDEBUG */
-/* when not built with debug, this function doesn't exist */
+#else /* DEBUGBUILD */
+/* when not built with debug, this function does not exist */
#define easy_events(x) CURLE_NOT_BUILT_IN
#endif
@@ -706,7 +707,7 @@ static CURLcode easy_transfer(struct Curl_multi *multi)
* easy handle, destroys the multi handle and returns the easy handle's return
* code.
*
- * REALITY: it can't just create and destroy the multi handle that easily. It
+ * REALITY: it cannot just create and destroy the multi handle that easily. It
* needs to keep it around since if this easy handle is used again by this
* function, the same multi handle must be reused so that the same pools and
* caches can be used.
@@ -768,7 +769,7 @@ static CURLcode easy_perform(struct Curl_easy *data, bool events)
/* run the transfer */
result = events ? easy_events(multi) : easy_transfer(multi);
- /* ignoring the return code isn't nice, but atm we can't really handle
+ /* ignoring the return code is not nice, but atm we cannot really handle
a failure here, room for future improvement! */
(void)curl_multi_remove_handle(multi, data);
@@ -788,7 +789,7 @@ CURLcode curl_easy_perform(struct Curl_easy *data)
return easy_perform(data, FALSE);
}
-#ifdef CURLDEBUG
+#ifdef DEBUGBUILD
/*
* curl_easy_perform_ev() is the external interface that performs a blocking
* transfer using the event-based API internally.
@@ -1090,7 +1091,7 @@ CURLcode curl_easy_pause(struct Curl_easy *data, int action)
bool keep_changed, unpause_read, not_all_paused;
if(!GOOD_EASY_HANDLE(data) || !data->conn)
- /* crazy input, don't continue */
+ /* crazy input, do not continue */
return CURLE_BAD_FUNCTION_ARGUMENT;
if(Curl_is_in_callback(data))
@@ -1142,6 +1143,11 @@ CURLcode curl_easy_pause(struct Curl_easy *data, int action)
goto out;
}
+ if(!(k->keepon & KEEP_RECV_PAUSE) && Curl_cwriter_is_paused(data)) {
+ Curl_conn_ev_data_pause(data, FALSE);
+ result = Curl_cwriter_unpause(data);
+ }
+
out:
if(!result && !data->state.done && keep_changed)
/* This transfer may have been moved in or out of the bundle, update the
diff --git a/libs/libcurl/src/easygetopt.c b/libs/libcurl/src/easygetopt.c
index 40498b1fc4..b54f1db880 100644
--- a/libs/libcurl/src/easygetopt.c
+++ b/libs/libcurl/src/easygetopt.c
@@ -42,7 +42,7 @@ static struct curl_easyoption *lookup(const char *name, CURLoption id)
}
else {
if((o->id == id) && !(o->flags & CURLOT_FLAG_ALIAS))
- /* don't match alias options */
+ /* do not match alias options */
return o;
}
o++;
diff --git a/libs/libcurl/src/easyif.h b/libs/libcurl/src/easyif.h
index af2c1fb151..615b6f33a4 100644
--- a/libs/libcurl/src/easyif.h
+++ b/libs/libcurl/src/easyif.h
@@ -34,7 +34,7 @@ CURLcode Curl_senddata(struct Curl_easy *data, const void *buffer,
CURLcode Curl_connect_only_attach(struct Curl_easy *data);
#endif
-#ifdef CURLDEBUG
+#ifdef DEBUGBUILD
CURL_EXTERN CURLcode curl_easy_perform_ev(struct Curl_easy *easy);
#endif
diff --git a/libs/libcurl/src/easyoptions.c b/libs/libcurl/src/easyoptions.c
index ad80e253d2..66c0ef13c3 100644
--- a/libs/libcurl/src/easyoptions.c
+++ b/libs/libcurl/src/easyoptions.c
@@ -328,6 +328,7 @@ struct curl_easyoption Curl_easyopts[] = {
CURLOT_LONG, 0},
{"TCP_FASTOPEN", CURLOPT_TCP_FASTOPEN, CURLOT_LONG, 0},
{"TCP_KEEPALIVE", CURLOPT_TCP_KEEPALIVE, CURLOT_LONG, 0},
+ {"TCP_KEEPCNT", CURLOPT_TCP_KEEPCNT, CURLOT_LONG, 0},
{"TCP_KEEPIDLE", CURLOPT_TCP_KEEPIDLE, CURLOT_LONG, 0},
{"TCP_KEEPINTVL", CURLOPT_TCP_KEEPINTVL, CURLOT_LONG, 0},
{"TCP_NODELAY", CURLOPT_TCP_NODELAY, CURLOT_LONG, 0},
@@ -376,6 +377,6 @@ struct curl_easyoption Curl_easyopts[] = {
*/
int Curl_easyopts_check(void)
{
- return ((CURLOPT_LASTENTRY%10000) != (325 + 1));
+ return ((CURLOPT_LASTENTRY%10000) != (326 + 1));
}
#endif
diff --git a/libs/libcurl/src/escape.c b/libs/libcurl/src/escape.c
index a64137c8ae..b154c584f6 100644
--- a/libs/libcurl/src/escape.c
+++ b/libs/libcurl/src/escape.c
@@ -70,7 +70,8 @@ char *curl_easy_escape(struct Curl_easy *data, const char *string,
return strdup("");
while(length--) {
- unsigned char in = *string++; /* treat the characters unsigned */
+ /* treat the characters unsigned */
+ unsigned char in = (unsigned char)*string++;
if(ISUNRESERVED(in)) {
/* append this */
@@ -137,7 +138,7 @@ CURLcode Curl_urldecode(const char *string, size_t length,
*ostring = ns;
while(alloc) {
- unsigned char in = *string;
+ unsigned char in = (unsigned char)*string;
if(('%' == in) && (alloc > 2) &&
ISXDIGIT(string[1]) && ISXDIGIT(string[2])) {
/* this is two hexadecimal digits following a '%' */
@@ -157,7 +158,7 @@ CURLcode Curl_urldecode(const char *string, size_t length,
return CURLE_URL_MALFORMAT;
}
- *ns++ = in;
+ *ns++ = (char)in;
}
*ns = 0; /* terminate it */
@@ -222,8 +223,8 @@ void Curl_hexencode(const unsigned char *src, size_t len, /* input length */
while(len-- && (olen >= 3)) {
/* clang-tidy warns on this line without this comment: */
/* NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult) */
- *out++ = hex[(*src & 0xF0)>>4];
- *out++ = hex[*src & 0x0F];
+ *out++ = (unsigned char)hex[(*src & 0xF0)>>4];
+ *out++ = (unsigned char)hex[*src & 0x0F];
++src;
olen -= 2;
}
diff --git a/libs/libcurl/src/file.c b/libs/libcurl/src/file.c
index fa71856471..0d3040d092 100644
--- a/libs/libcurl/src/file.c
+++ b/libs/libcurl/src/file.c
@@ -147,7 +147,7 @@ static CURLcode file_setup_connection(struct Curl_easy *data,
/*
* file_connect() gets called from Curl_protocol_connect() to allow us to
- * do protocol-specific actions at connect-time. We emulate a
+ * do protocol-specific actions at connect-time. We emulate a
* connect-then-transfer protocol and "connect" to the file here
*/
static CURLcode file_connect(struct Curl_easy *data, bool *done)
@@ -177,18 +177,18 @@ static CURLcode file_connect(struct Curl_easy *data, bool *done)
return result;
#ifdef DOS_FILESYSTEM
- /* If the first character is a slash, and there's
+ /* If the first character is a slash, and there is
something that looks like a drive at the beginning of
- the path, skip the slash. If we remove the initial
+ the path, skip the slash. If we remove the initial
slash in all cases, paths without drive letters end up
- relative to the current directory which isn't how
+ relative to the current directory which is not how
browsers work.
Some browsers accept | instead of : as the drive letter
separator, so we do too.
On other platforms, we need the slash to indicate an
- absolute pathname. On Windows, absolute paths start
+ absolute pathname. On Windows, absolute paths start
with a drive letter.
*/
actual_path = real_path;
@@ -308,7 +308,7 @@ static CURLcode file_upload(struct Curl_easy *data)
bool eos = FALSE;
/*
- * Since FILE: doesn't do the full init, we need to provide some extra
+ * Since FILE: does not do the full init, we need to provide some extra
* assignments here.
*/
@@ -331,7 +331,7 @@ static CURLcode file_upload(struct Curl_easy *data)
fd = open(file->path, mode, data->set.new_file_perms);
if(fd < 0) {
- failf(data, "Can't open %s for writing", file->path);
+ failf(data, "cannot open %s for writing", file->path);
return CURLE_WRITE_ERROR;
}
@@ -343,7 +343,7 @@ static CURLcode file_upload(struct Curl_easy *data)
if(data->state.resume_from < 0) {
if(fstat(fd, &file_stat)) {
close(fd);
- failf(data, "Can't get the size of %s", file->path);
+ failf(data, "cannot get the size of %s", file->path);
return CURLE_WRITE_ERROR;
}
data->state.resume_from = (curl_off_t)file_stat.st_size;
@@ -413,13 +413,13 @@ out:
* file_do() is the protocol-specific function for the do-phase, separated
* from the connect-phase above. Other protocols merely setup the transfer in
* the do-phase, to have it done in the main transfer loop but since some
- * platforms we support don't allow select()ing etc on file handles (as
+ * platforms we support do not allow select()ing etc on file handles (as
* opposed to sockets) we instead perform the whole do-operation in this
* function.
*/
static CURLcode file_do(struct Curl_easy *data, bool *done)
{
- /* This implementation ignores the host name in conformance with
+ /* This implementation ignores the hostname in conformance with
RFC 1738. Only local files (reachable via the standard file system)
are supported. This means that files on remotely mounted directories
(via NFS, Samba, NT sharing) can be accessed through a file:// URL
@@ -465,17 +465,18 @@ static CURLcode file_do(struct Curl_easy *data, bool *done)
const struct tm *tm = &buffer;
char header[80];
int headerlen;
- char accept_ranges[24]= { "Accept-ranges: bytes\r\n" };
+ static const char accept_ranges[]= { "Accept-ranges: bytes\r\n" };
if(expected_size >= 0) {
- headerlen = msnprintf(header, sizeof(header),
- "Content-Length: %" CURL_FORMAT_CURL_OFF_T "\r\n",
- expected_size);
+ headerlen =
+ msnprintf(header, sizeof(header),
+ "Content-Length: %" CURL_FORMAT_CURL_OFF_T "\r\n",
+ expected_size);
result = Curl_client_write(data, CLIENTWRITE_HEADER, header, headerlen);
if(result)
return result;
result = Curl_client_write(data, CLIENTWRITE_HEADER,
- accept_ranges, strlen(accept_ranges));
+ accept_ranges, sizeof(accept_ranges) - 1);
if(result != CURLE_OK)
return result;
}
@@ -486,23 +487,26 @@ static CURLcode file_do(struct Curl_easy *data, bool *done)
return result;
/* format: "Tue, 15 Nov 1994 12:45:26 GMT" */
- headerlen = msnprintf(header, sizeof(header),
- "Last-Modified: %s, %02d %s %4d %02d:%02d:%02d GMT\r\n%s",
- Curl_wkday[tm->tm_wday?tm->tm_wday-1:6],
- tm->tm_mday,
- Curl_month[tm->tm_mon],
- tm->tm_year + 1900,
- tm->tm_hour,
- tm->tm_min,
- tm->tm_sec,
- data->req.no_body ? "": "\r\n");
+ headerlen =
+ msnprintf(header, sizeof(header),
+ "Last-Modified: %s, %02d %s %4d %02d:%02d:%02d GMT\r\n",
+ Curl_wkday[tm->tm_wday?tm->tm_wday-1:6],
+ tm->tm_mday,
+ Curl_month[tm->tm_mon],
+ tm->tm_year + 1900,
+ tm->tm_hour,
+ tm->tm_min,
+ tm->tm_sec);
result = Curl_client_write(data, CLIENTWRITE_HEADER, header, headerlen);
+ if(!result)
+ /* end of headers */
+ result = Curl_client_write(data, CLIENTWRITE_HEADER, "\r\n", 2);
if(result)
return result;
/* set the file size to make it available post transfer */
Curl_pgrsSetDownloadSize(data, expected_size);
if(data->req.no_body)
- return result;
+ return CURLE_OK;
}
/* Check whether file range has been specified */
@@ -514,7 +518,7 @@ static CURLcode file_do(struct Curl_easy *data, bool *done)
* of the stream if the filesize could be determined */
if(data->state.resume_from < 0) {
if(!fstated) {
- failf(data, "Can't get the size of file.");
+ failf(data, "cannot get the size of file.");
return CURLE_READ_ERROR;
}
data->state.resume_from += (curl_off_t)statbuf.st_size;
@@ -522,7 +526,7 @@ static CURLcode file_do(struct Curl_easy *data, bool *done)
if(data->state.resume_from > 0) {
/* We check explicitly if we have a start offset, because
- * expected_size may be -1 if we don't know how large the file is,
+ * expected_size may be -1 if we do not know how large the file is,
* in which case we should not adjust it. */
if(data->state.resume_from <= expected_size)
expected_size -= data->state.resume_from;
@@ -566,7 +570,7 @@ static CURLcode file_do(struct Curl_easy *data, bool *done)
if(!S_ISDIR(statbuf.st_mode)) {
while(!result) {
ssize_t nread;
- /* Don't fill a whole buffer if we want less than all data */
+ /* Do not fill a whole buffer if we want less than all data */
size_t bytestoread;
if(size_known) {
diff --git a/libs/libcurl/src/fopen.c b/libs/libcurl/src/fopen.c
index 4cbd03d1a6..110345a067 100644
--- a/libs/libcurl/src/fopen.c
+++ b/libs/libcurl/src/fopen.c
@@ -42,12 +42,12 @@
/*
The dirslash() function breaks a null-terminated pathname string into
directory and filename components then returns the directory component up
- to, *AND INCLUDING*, a final '/'. If there is no directory in the path,
+ to, *AND INCLUDING*, a final '/'. If there is no directory in the path,
this instead returns a "" string.
This function returns a pointer to malloc'ed memory.
- The input path to this function is expected to have a file name part.
+ The input path to this function is expected to have a filename part.
*/
#ifdef _WIN32
@@ -88,7 +88,7 @@ static char *dirslash(const char *path)
* Curl_fopen() opens a file for writing with a temp name, to be renamed
* to the final name when completed. If there is an existing file using this
* name at the time of the open, this function will clone the mode from that
- * file. if 'tempname' is non-NULL, it needs a rename after the file is
+ * file. if 'tempname' is non-NULL, it needs a rename after the file is
* written.
*/
CURLcode Curl_fopen(struct Curl_easy *data, const char *filename,
@@ -117,7 +117,7 @@ CURLcode Curl_fopen(struct Curl_easy *data, const char *filename,
dir = dirslash(filename);
if(dir) {
- /* The temp file name should not end up too long for the target file
+ /* The temp filename should not end up too long for the target file
system */
tempstore = aprintf("%s%s.tmp", dir, randbuf);
free(dir);
diff --git a/libs/libcurl/src/formdata.c b/libs/libcurl/src/formdata.c
index 1fd05fe186..6125d3c8bd 100644
--- a/libs/libcurl/src/formdata.c
+++ b/libs/libcurl/src/formdata.c
@@ -216,8 +216,8 @@ CURLFORMcode FormAdd(struct curl_httppost **httppost,
struct curl_forms *forms = NULL;
char *array_value = NULL; /* value read from an array */
- /* This is a state variable, that if TRUE means that we're parsing an
- array that we got passed to us. If FALSE we're parsing the input
+ /* This is a state variable, that if TRUE means that we are parsing an
+ array that we got passed to us. If FALSE we are parsing the input
va_list arguments. */
bool array_state = FALSE;
@@ -260,7 +260,7 @@ CURLFORMcode FormAdd(struct curl_httppost **httppost,
switch(option) {
case CURLFORM_ARRAY:
if(array_state)
- /* we don't support an array from within an array */
+ /* we do not support an array from within an array */
return_value = CURL_FORMADD_ILLEGAL_ARRAY;
else {
forms = va_arg(params, struct curl_forms *);
@@ -327,7 +327,7 @@ CURLFORMcode FormAdd(struct curl_httppost **httppost,
array_state?(curl_off_t)(size_t)array_value:va_arg(params, curl_off_t);
break;
- /* Get contents from a given file name */
+ /* Get contents from a given filename */
case CURLFORM_FILECONTENT:
if(current_form->flags & (HTTPPOST_PTRCONTENTS|HTTPPOST_READFILE))
return_value = CURL_FORMADD_OPTION_TWICE;
@@ -429,7 +429,7 @@ CURLFORMcode FormAdd(struct curl_httppost **httppost,
array_state?array_value:va_arg(params, char *);
if(userp) {
current_form->userp = userp;
- current_form->value = userp; /* this isn't strictly true but we
+ current_form->value = userp; /* this is not strictly true but we
derive a value from this later on
and we need this non-NULL to be
accepted as a fine form part */
@@ -599,7 +599,7 @@ CURLFORMcode FormAdd(struct curl_httppost **httppost,
}
if(!(form->flags & HTTPPOST_PTRNAME) &&
(form == first_form) ) {
- /* Note that there's small risk that form->name is NULL here if the
+ /* Note that there is small risk that form->name is NULL here if the
app passed in a bad combo, so we better check for that first. */
if(form->name) {
/* copy name (without strdup; possibly not null-terminated) */
@@ -764,7 +764,7 @@ void curl_formfree(struct curl_httppost *form)
)
free(form->contents); /* free the contents */
free(form->contenttype); /* free the content type */
- free(form->showfilename); /* free the faked file name */
+ free(form->showfilename); /* free the faked filename */
free(form); /* free the struct */
form = next;
} while(form); /* continue */
@@ -880,10 +880,10 @@ CURLcode Curl_getformdata(struct Curl_easy *data,
if(post->flags & (HTTPPOST_FILENAME | HTTPPOST_READFILE)) {
if(!strcmp(file->contents, "-")) {
- /* There are a few cases where the code below won't work; in
+ /* There are a few cases where the code below will not work; in
particular, freopen(stdin) by the caller is not guaranteed
to result as expected. This feature has been kept for backward
- compatibility: use of "-" pseudo file name should be avoided. */
+ compatibility: use of "-" pseudo filename should be avoided. */
result = curl_mime_data_cb(part, (curl_off_t) -1,
(curl_read_callback) fread,
fseeko_wrapper,
@@ -915,7 +915,7 @@ CURLcode Curl_getformdata(struct Curl_easy *data,
}
}
- /* Set fake file name. */
+ /* Set fake filename. */
if(!result && post->showfilename)
if(post->more || (post->flags & (HTTPPOST_FILENAME | HTTPPOST_BUFFER |
HTTPPOST_CALLBACK)))
diff --git a/libs/libcurl/src/formdata.h b/libs/libcurl/src/formdata.h
index a8db4cba90..447b4173db 100644
--- a/libs/libcurl/src/formdata.h
+++ b/libs/libcurl/src/formdata.h
@@ -38,8 +38,8 @@ struct FormInfo {
long flags;
char *buffer; /* pointer to existing buffer used for file upload */
size_t bufferlength;
- char *showfilename; /* The file name to show. If not set, the actual
- file name will be used */
+ char *showfilename; /* The filename to show. If not set, the actual
+ filename will be used */
char *userp; /* pointer for the read callback */
struct curl_slist *contentheader;
struct FormInfo *more;
diff --git a/libs/libcurl/src/ftp.c b/libs/libcurl/src/ftp.c
index 6b1f2a31f6..17fa5a6c39 100644
--- a/libs/libcurl/src/ftp.c
+++ b/libs/libcurl/src/ftp.c
@@ -290,12 +290,11 @@ const struct Curl_handler Curl_handler_ftps = {
};
#endif
-static void close_secondarysocket(struct Curl_easy *data,
- struct connectdata *conn)
+static void close_secondarysocket(struct Curl_easy *data)
{
CURL_TRC_FTP(data, "[%s] closing DATA connection", FTP_DSTATE(data));
Curl_conn_close(data, SECONDARYSOCKET);
- Curl_conn_cf_discard_all(data, conn, SECONDARYSOCKET);
+ Curl_conn_cf_discard_all(data, data->conn, SECONDARYSOCKET);
}
/*
@@ -475,7 +474,7 @@ static CURLcode AcceptServerConnect(struct Curl_easy *data)
Curl_set_in_callback(data, false);
if(error) {
- close_secondarysocket(data, conn);
+ close_secondarysocket(data);
return CURLE_ABORTED_BY_CALLBACK;
}
}
@@ -649,19 +648,19 @@ static CURLcode InitiateTransfer(struct Curl_easy *data)
return result;
if(conn->proto.ftpc.state_saved == FTP_STOR) {
- /* When we know we're uploading a specified file, we can get the file
+ /* When we know we are uploading a specified file, we can get the file
size prior to the actual upload. */
Curl_pgrsSetUploadSize(data, data->state.infilesize);
/* set the SO_SNDBUF for the secondary socket for those who need it */
- Curl_sndbufset(conn->sock[SECONDARYSOCKET]);
+ Curl_sndbuf_init(conn->sock[SECONDARYSOCKET]);
- Curl_xfer_setup(data, -1, -1, FALSE, SECONDARYSOCKET);
+ Curl_xfer_setup2(data, CURL_XFER_SEND, -1, TRUE);
}
else {
/* FTP download: */
- Curl_xfer_setup(data, SECONDARYSOCKET,
- conn->proto.ftpc.retr_size_saved, FALSE, -1);
+ Curl_xfer_setup2(data, CURL_XFER_RECV,
+ conn->proto.ftpc.retr_size_saved, TRUE);
}
conn->proto.ftpc.pp.pending_resp = TRUE; /* expect server response */
@@ -674,7 +673,7 @@ static CURLcode InitiateTransfer(struct Curl_easy *data)
*
* AllowServerConnect()
*
- * When we've issue the PORT command, we have told the server to connect to
+ * When we have issue the PORT command, we have told the server to connect to
* us. This function checks whether data connection is established if so it is
* accepted.
*
@@ -806,7 +805,7 @@ CURLcode Curl_GetFTPResponse(struct Curl_easy *data,
{
/*
* We cannot read just one byte per read() and then go back to select() as
- * the OpenSSL read() doesn't grok that properly.
+ * the OpenSSL read() does not grok that properly.
*
* Alas, read as much as possible, split up into lines, use the ending
* line in a response or continue reading. */
@@ -849,16 +848,16 @@ CURLcode Curl_GetFTPResponse(struct Curl_easy *data,
*
* A caution here is that the ftp_readresp() function has a cache that may
* contain pieces of a response from the previous invoke and we need to
- * make sure we don't just wait for input while there is unhandled data in
+ * make sure we do not just wait for input while there is unhandled data in
* that cache. But also, if the cache is there, we call ftp_readresp() and
- * the cache wasn't good enough to continue we must not just busy-loop
+ * the cache was not good enough to continue we must not just busy-loop
* around this function.
*
*/
if(Curl_dyn_len(&pp->recvbuf) && (cache_skip < 2)) {
/*
- * There's a cache left since before. We then skipping the wait for
+ * There is a cache left since before. We then skipping the wait for
* socket action, unless this is the same cache like the previous round
* as then the cache was deemed not enough to act on and we then need to
* wait for more data anyway.
@@ -895,7 +894,7 @@ CURLcode Curl_GetFTPResponse(struct Curl_easy *data,
*nreadp += nread;
- } /* while there's buffer left and loop is requested */
+ } /* while there is buffer left and loop is requested */
pp->pending_resp = FALSE;
@@ -948,7 +947,7 @@ static int ftp_domore_getsock(struct Curl_easy *data,
CURL_TRC_FTP(data, "[%s] ftp_domore_getsock()", FTP_DSTATE(data));
if(FTP_STOP == ftpc->state) {
- /* if stopped and still in this state, then we're also waiting for a
+ /* if stopped and still in this state, then we are also waiting for a
connect on the secondary connection */
DEBUGASSERT(conn->sock[SECONDARYSOCKET] != CURL_SOCKET_BAD ||
(conn->cfilter[SECONDARYSOCKET] &&
@@ -1136,13 +1135,13 @@ static CURLcode ftp_state_use_port(struct Curl_easy *data,
#endif
ipstr, hbuf, sizeof(hbuf))) {
case IF2IP_NOT_FOUND:
- /* not an interface, use the given string as host name instead */
+ /* not an interface, use the given string as hostname instead */
host = ipstr;
break;
case IF2IP_AF_NOT_SUPPORTED:
goto out;
case IF2IP_FOUND:
- host = hbuf; /* use the hbuf for host name */
+ host = hbuf; /* use the hbuf for hostname */
break;
}
}
@@ -1153,7 +1152,7 @@ static CURLcode ftp_state_use_port(struct Curl_easy *data,
if(!host) {
const char *r;
- /* not an interface and not a host name, get default by extracting
+ /* not an interface and not a hostname, get default by extracting
the IP from the control connection */
sslen = sizeof(ss);
if(getsockname(conn->sock[FIRSTSOCKET], sa, &sslen)) {
@@ -1174,7 +1173,7 @@ static CURLcode ftp_state_use_port(struct Curl_easy *data,
if(!r) {
goto out;
}
- host = hbuf; /* use this host name */
+ host = hbuf; /* use this hostname */
possibly_non_local = FALSE; /* we know it is local now */
}
@@ -1232,7 +1231,7 @@ static CURLcode ftp_state_use_port(struct Curl_easy *data,
/* It failed. */
error = SOCKERRNO;
if(possibly_non_local && (error == EADDRNOTAVAIL)) {
- /* The requested bind address is not local. Use the address used for
+ /* The requested bind address is not local. Use the address used for
* the control connection instead and restart the port loop
*/
infof(data, "bind(port=%hu) on non-local address failed: %s", port,
@@ -1245,7 +1244,7 @@ static CURLcode ftp_state_use_port(struct Curl_easy *data,
goto out;
}
port = port_min;
- possibly_non_local = FALSE; /* don't try this again */
+ possibly_non_local = FALSE; /* do not try this again */
continue;
}
if(error != EADDRINUSE && error != EACCES) {
@@ -1355,7 +1354,7 @@ static CURLcode ftp_state_use_port(struct Curl_easy *data,
char *dest = target;
/* translate x.x.x.x to x,x,x,x */
- while(source && *source) {
+ while(*source) {
if(*source == '.')
*dest = ',';
else
@@ -1444,7 +1443,7 @@ static CURLcode ftp_state_prepare_transfer(struct Curl_easy *data)
struct connectdata *conn = data->conn;
if(ftp->transfer != PPTRANSFER_BODY) {
- /* doesn't transfer any data */
+ /* does not transfer any data */
/* still possibly do PRE QUOTE jobs */
ftp_state(data, FTP_RETR_PREQUOTE);
@@ -1512,7 +1511,7 @@ static CURLcode ftp_state_size(struct Curl_easy *data,
if((ftp->transfer == PPTRANSFER_INFO) && ftpc->file) {
/* if a "head"-like request is being made (on a file) */
- /* we know ftpc->file is a valid pointer to a file name */
+ /* we know ftpc->file is a valid pointer to a filename */
result = Curl_pp_sendf(data, &ftpc->pp, "SIZE %s", ftpc->file);
if(!result)
ftp_state(data, FTP_SIZE);
@@ -1590,13 +1589,13 @@ static CURLcode ftp_state_list(struct Curl_easy *data)
static CURLcode ftp_state_retr_prequote(struct Curl_easy *data)
{
- /* We've sent the TYPE, now we must send the list of prequote strings */
+ /* We have sent the TYPE, now we must send the list of prequote strings */
return ftp_state_quote(data, TRUE, FTP_RETR_PREQUOTE);
}
static CURLcode ftp_state_stor_prequote(struct Curl_easy *data)
{
- /* We've sent the TYPE, now we must send the list of prequote strings */
+ /* We have sent the TYPE, now we must send the list of prequote strings */
return ftp_state_quote(data, TRUE, FTP_STOR_PREQUOTE);
}
@@ -1608,7 +1607,7 @@ static CURLcode ftp_state_type(struct Curl_easy *data)
struct ftp_conn *ftpc = &conn->proto.ftpc;
/* If we have selected NOBODY and HEADER, it means that we only want file
- information. Which in FTP can't be much more than the file size and
+ information. Which in FTP cannot be much more than the file size and
date. */
if(data->req.no_body && ftpc->file &&
ftp_need_type(conn, data->state.prefer_ascii)) {
@@ -1668,13 +1667,13 @@ static CURLcode ftp_state_ul_setup(struct Curl_easy *data,
if((data->state.resume_from && !sizechecked) ||
((data->state.resume_from > 0) && sizechecked)) {
- /* we're about to continue the uploading of a file */
+ /* we are about to continue the uploading of a file */
/* 1. get already existing file's size. We use the SIZE command for this
which may not exist in the server! The SIZE command is not in
RFC959. */
/* 2. This used to set REST. But since we can do append, we
- don't another ftp command. We just skip the source file
+ do not another ftp command. We just skip the source file
offset and then we APPEND the rest on the file instead */
/* 3. pass file-size number of bytes in the source file */
@@ -1707,7 +1706,7 @@ static CURLcode ftp_state_ul_setup(struct Curl_easy *data,
failf(data, "Could not seek stream");
return CURLE_FTP_COULDNT_USE_REST;
}
- /* seekerr == CURL_SEEKFUNC_CANTSEEK (can't seek to offset) */
+ /* seekerr == CURL_SEEKFUNC_CANTSEEK (cannot seek to offset) */
do {
char scratch[4*1024];
size_t readthisamountnow =
@@ -1736,17 +1735,17 @@ static CURLcode ftp_state_ul_setup(struct Curl_easy *data,
infof(data, "File already completely uploaded");
/* no data to transfer */
- Curl_xfer_setup(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup_nop(data);
- /* Set ->transfer so that we won't get any error in
- * ftp_done() because we didn't transfer anything! */
+ /* Set ->transfer so that we will not get any error in
+ * ftp_done() because we did not transfer anything! */
ftp->transfer = PPTRANSFER_NONE;
ftp_state(data, FTP_STOP);
return CURLE_OK;
}
}
- /* we've passed, proceed as normal */
+ /* we have passed, proceed as normal */
} /* resume_from */
result = Curl_pp_sendf(data, &ftpc->pp, append?"APPE %s":"STOR %s",
@@ -1835,16 +1834,16 @@ static CURLcode ftp_state_quote(struct Curl_easy *data,
}
else {
if(data->set.ignorecl || data->state.prefer_ascii) {
- /* 'ignorecl' is used to support download of growing files. It
+ /* 'ignorecl' is used to support download of growing files. It
prevents the state machine from requesting the file size from
- the server. With an unknown file size the download continues
+ the server. With an unknown file size the download continues
until the server terminates it, otherwise the client stops if
- the received byte count exceeds the reported file size. Set
+ the received byte count exceeds the reported file size. Set
option CURLOPT_IGNORE_CONTENT_LENGTH to 1 to enable this
behavior.
In addition: asking for the size for 'TYPE A' transfers is not
- constructive since servers don't report the converted size. So
+ constructive since servers do not report the converted size. So
skip it.
*/
result = Curl_pp_sendf(data, &ftpc->pp, "RETR %s", ftpc->file);
@@ -1882,7 +1881,7 @@ static CURLcode ftp_epsv_disable(struct Curl_easy *data,
&& !(conn->bits.tunnel_proxy || conn->bits.socksproxy)
#endif
) {
- /* We can't disable EPSV when doing IPv6, so this is instead a fail */
+ /* We cannot disable EPSV when doing IPv6, so this is instead a fail */
failf(data, "Failed EPSV attempt, exiting");
return CURLE_WEIRD_SERVER_REPLY;
}
@@ -1907,7 +1906,7 @@ static CURLcode ftp_epsv_disable(struct Curl_easy *data,
static char *control_address(struct connectdata *conn)
{
/* Returns the control connection IP address.
- If a proxy tunnel is used, returns the original host name instead, because
+ If a proxy tunnel is used, returns the original hostname instead, because
the effective control connection address is the proxy address,
not the ftp host. */
#ifndef CURL_DISABLE_PROXY
@@ -2046,7 +2045,7 @@ static CURLcode ftp_state_pasv_resp(struct Curl_easy *data,
if(conn->bits.proxy) {
/*
* This connection uses a proxy and we need to connect to the proxy again
- * here. We don't want to rely on a former host lookup that might've
+ * here. We do not want to rely on a former host lookup that might've
* expired now, instead we remake the lookup here and now!
*/
const char * const host_name = conn->bits.socksproxy ?
@@ -2061,7 +2060,7 @@ static CURLcode ftp_state_pasv_resp(struct Curl_easy *data,
connectport = (unsigned short)conn->primary.remote_port;
if(!addr) {
- failf(data, "Can't resolve proxy host %s:%hu", host_name, connectport);
+ failf(data, "cannot resolve proxy host %s:%hu", host_name, connectport);
return CURLE_COULDNT_RESOLVE_PROXY;
}
}
@@ -2088,7 +2087,8 @@ static CURLcode ftp_state_pasv_resp(struct Curl_easy *data,
connectport = ftpc->newport; /* we connect to the remote port */
if(!addr) {
- failf(data, "Can't resolve new host %s:%hu", ftpc->newhost, connectport);
+ failf(data, "cannot resolve new host %s:%hu",
+ ftpc->newhost, connectport);
return CURLE_FTP_CANT_GET_HOST;
}
}
@@ -2098,7 +2098,7 @@ static CURLcode ftp_state_pasv_resp(struct Curl_easy *data,
CURL_CF_SSL_ENABLE : CURL_CF_SSL_DISABLE);
if(result) {
- Curl_resolv_unlock(data, addr); /* we're done using this address */
+ Curl_resolv_unlock(data, addr); /* we are done using this address */
if(ftpc->count1 == 0 && ftpcode == 229)
return ftp_epsv_disable(data, conn);
@@ -2116,7 +2116,7 @@ static CURLcode ftp_state_pasv_resp(struct Curl_easy *data,
/* this just dumps information about this second connection */
ftp_pasv_verbose(data, addr->addr, ftpc->newhost, connectport);
- Curl_resolv_unlock(data, addr); /* we're done using this address */
+ Curl_resolv_unlock(data, addr); /* we are done using this address */
Curl_safefree(conn->secondaryhostname);
conn->secondary_port = ftpc->newport;
@@ -2204,7 +2204,7 @@ static CURLcode client_write_header(struct Curl_easy *data,
* call to Curl_client_write() so it does the right thing.
*
* Notice that we cannot enable this flag for FTP in general,
- * as an FTP transfer might involve a HTTP proxy connection and
+ * as an FTP transfer might involve an HTTP proxy connection and
* headers from CONNECT should not automatically be part of the
* output. */
CURLcode result;
@@ -2371,17 +2371,17 @@ static CURLcode ftp_state_retr(struct Curl_easy *data,
/* We always (attempt to) get the size of downloads, so it is done before
this even when not doing resumes. */
if(filesize == -1) {
- infof(data, "ftp server doesn't support SIZE");
- /* We couldn't get the size and therefore we can't know if there really
+ infof(data, "ftp server does not support SIZE");
+ /* We could not get the size and therefore we cannot know if there really
is a part of the file left to get, although the server will just
- close the connection when we start the connection so it won't cause
+ close the connection when we start the connection so it will not cause
us any harm, just not make us exit as nicely. */
}
else {
/* We got a file size report, so we check that there actually is a
part of the file left to get, or else we go home. */
if(data->state.resume_from< 0) {
- /* We're supposed to download the last abs(from) bytes */
+ /* We are supposed to download the last abs(from) bytes */
if(filesize < -data->state.resume_from) {
failf(data, "Offset (%" CURL_FORMAT_CURL_OFF_T
") was beyond file size (%" CURL_FORMAT_CURL_OFF_T ")",
@@ -2407,11 +2407,11 @@ static CURLcode ftp_state_retr(struct Curl_easy *data,
if(ftp->downloadsize == 0) {
/* no data to transfer */
- Curl_xfer_setup(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup_nop(data);
infof(data, "File already completely downloaded");
- /* Set ->transfer so that we won't get any error in ftp_done()
- * because we didn't transfer the any file */
+ /* Set ->transfer so that we will not get any error in ftp_done()
+ * because we did not transfer the any file */
ftp->transfer = PPTRANSFER_NONE;
ftp_state(data, FTP_STOP);
return CURLE_OK;
@@ -2619,7 +2619,7 @@ static CURLcode ftp_state_get_resp(struct Curl_easy *data,
!data->set.ignorecl &&
(ftp->downloadsize < 1)) {
/*
- * It seems directory listings either don't show the size or very
+ * It seems directory listings either do not show the size or very
* often uses size 0 anyway. ASCII transfers may very well turn out
* that the transferred amount of data is not the same as this line
* tells, why using this number in those cases only confuses us.
@@ -2690,7 +2690,7 @@ static CURLcode ftp_state_get_resp(struct Curl_easy *data,
else {
if((instate == FTP_LIST) && (ftpcode == 450)) {
/* simply no matching files in the dir listing */
- ftp->transfer = PPTRANSFER_NONE; /* don't download anything */
+ ftp->transfer = PPTRANSFER_NONE; /* do not download anything */
ftp_state(data, FTP_STOP); /* this phase is over */
}
else {
@@ -2777,7 +2777,7 @@ static CURLcode ftp_state_user_resp(struct Curl_easy *data,
if(data->set.str[STRING_FTP_ALTERNATIVE_TO_USER] &&
!ftpc->ftp_trying_alternative) {
- /* Ok, USER failed. Let's try the supplied command. */
+ /* Ok, USER failed. Let's try the supplied command. */
result =
Curl_pp_sendf(data, &ftpc->pp, "%s",
data->set.str[STRING_FTP_ALTERNATIVE_TO_USER]);
@@ -2863,7 +2863,7 @@ static CURLcode ftp_statemachine(struct Curl_easy *data,
#endif
if(data->set.use_ssl && !conn->bits.ftp_use_control_ssl) {
- /* We don't have a SSL/TLS control connection yet, but FTPS is
+ /* We do not have a SSL/TLS control connection yet, but FTPS is
requested. Try a FTPS connection now */
ftpc->count3 = 0;
@@ -2880,7 +2880,7 @@ static CURLcode ftp_statemachine(struct Curl_easy *data,
default:
failf(data, "unsupported parameter to CURLOPT_FTPSSLAUTH: %d",
(int)data->set.ftpsslauth);
- return CURLE_UNKNOWN_OPTION; /* we don't know what to do */
+ return CURLE_UNKNOWN_OPTION; /* we do not know what to do */
}
result = Curl_pp_sendf(data, &ftpc->pp, "AUTH %s",
ftpauth[ftpc->count1]);
@@ -2980,7 +2980,13 @@ static CURLcode ftp_statemachine(struct Curl_easy *data,
case FTP_CCC:
if(ftpcode < 500) {
/* First shut down the SSL layer (note: this call will block) */
- result = Curl_ssl_cfilter_remove(data, FIRSTSOCKET);
+ /* This has only been tested on the proftpd server, and the mod_tls
+ * code sends a close notify alert without waiting for a close notify
+ * alert in response. Thus we wait for a close notify alert from the
+ * server, but we do not send one. Let's hope other servers do
+ * the same... */
+ result = Curl_ssl_cfilter_remove(data, FIRSTSOCKET,
+ (data->set.ftp_ccc == CURLFTPSSL_CCC_ACTIVE));
if(result)
failf(data, "Failed to clear the command channel (CCC)");
@@ -3069,7 +3075,7 @@ static CURLcode ftp_statemachine(struct Curl_easy *data,
data->state.most_recent_ftp_entrypath = ftpc->entrypath;
}
else {
- /* couldn't get the path */
+ /* could not get the path */
Curl_dyn_free(&out);
infof(data, "Failed to figure out path");
}
@@ -3168,7 +3174,7 @@ static CURLcode ftp_statemachine(struct Curl_easy *data,
else {
/* return failure */
failf(data, "Server denied you to change to the given directory");
- ftpc->cwdfail = TRUE; /* don't remember this path as we failed
+ ftpc->cwdfail = TRUE; /* do not remember this path as we failed
to enter it */
result = CURLE_REMOTE_ACCESS_DENIED;
}
@@ -3373,7 +3379,7 @@ static CURLcode ftp_done(struct Curl_easy *data, CURLcode status,
case CURLE_REMOTE_FILE_NOT_FOUND:
case CURLE_WRITE_ERROR:
/* the connection stays alive fine even though this happened */
- case CURLE_OK: /* doesn't affect the control connection's status */
+ case CURLE_OK: /* does not affect the control connection's status */
if(!premature)
break;
@@ -3439,7 +3445,7 @@ static CURLcode ftp_done(struct Curl_easy *data, CURLcode status,
/* free the dir tree and file parts */
freedirs(ftpc);
- /* shut down the socket to inform the server we're done */
+ /* shut down the socket to inform the server we are done */
#ifdef _WIN32_WCE
shutdown(conn->sock[SECONDARYSOCKET], 2); /* SD_BOTH */
@@ -3457,7 +3463,7 @@ static CURLcode ftp_done(struct Curl_easy *data, CURLcode status,
}
}
- close_secondarysocket(data, conn);
+ close_secondarysocket(data);
}
if(!result && (ftp->transfer == PPTRANSFER_BODY) && ftpc->ctl_valid &&
@@ -3533,9 +3539,9 @@ static CURLcode ftp_done(struct Curl_easy *data, CURLcode status,
if((-1 != data->req.size) &&
(data->req.size != data->req.bytecount) &&
#ifdef CURL_DO_LINEEND_CONV
- /* Most FTP servers don't adjust their file SIZE response for CRLFs, so
- * we'll check to see if the discrepancy can be explained by the number
- * of CRLFs we've changed to LFs.
+ /* Most FTP servers do not adjust their file SIZE response for CRLFs,
+ * so we will check to see if the discrepancy can be explained by the
+ * number of CRLFs we have changed to LFs.
*/
((data->req.size + data->state.crlf_conversions) !=
data->req.bytecount) &&
@@ -3670,7 +3676,7 @@ static CURLcode ftp_nb_type(struct Curl_easy *data,
* ftp_pasv_verbose()
*
* This function only outputs some informationals about this second connection
- * when we've issued a PASV command before and thus we have connected to a
+ * when we have issued a PASV command before and thus we have connected to a
* possibly new IP address.
*
*/
@@ -3711,7 +3717,7 @@ static CURLcode ftp_do_more(struct Curl_easy *data, int *completep)
* complete */
struct FTP *ftp = NULL;
- /* if the second connection isn't done yet, wait for it to have
+ /* if the second connection is not done yet, wait for it to have
* connected to the remote host. When using proxy tunneling, this
* means the tunnel needs to have been establish. However, we
* can not expect the remote host to talk to us in any way yet.
@@ -3739,20 +3745,20 @@ static CURLcode ftp_do_more(struct Curl_easy *data, int *completep)
*completep = (int)complete;
- /* if we got an error or if we don't wait for a data connection return
+ /* if we got an error or if we do not wait for a data connection return
immediately */
if(result || !ftpc->wait_data_conn)
return result;
/* if we reach the end of the FTP state machine here, *complete will be
TRUE but so is ftpc->wait_data_conn, which says we need to wait for the
- data connection and therefore we're not actually complete */
+ data connection and therefore we are not actually complete */
*completep = 0;
}
if(ftp->transfer <= PPTRANSFER_INFO) {
- /* a transfer is about to take place, or if not a file name was given
- so we'll do a SIZE on it later and then we need the right TYPE first */
+ /* a transfer is about to take place, or if not a filename was given so we
+ will do a SIZE on it later and then we need the right TYPE first */
if(ftpc->wait_data_conn) {
bool serv_conned;
@@ -3791,7 +3797,7 @@ static CURLcode ftp_do_more(struct Curl_easy *data, int *completep)
result = Curl_range(data);
if(result == CURLE_OK && data->req.maxdownload >= 0) {
- /* Don't check for successful transfer */
+ /* Do not check for successful transfer */
ftpc->dont_check = TRUE;
}
@@ -3824,7 +3830,7 @@ static CURLcode ftp_do_more(struct Curl_easy *data, int *completep)
}
/* no data to transfer */
- Curl_xfer_setup(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup_nop(data);
if(!ftpc->wait_data_conn) {
/* no waiting for the data connection so this is now complete */
@@ -3955,7 +3961,7 @@ static CURLcode init_wc_data(struct Curl_easy *data)
if(data->set.ftp_filemethod == FTPFILE_NOCWD)
data->set.ftp_filemethod = FTPFILE_MULTICWD;
- /* try to parse ftp url */
+ /* try to parse ftp URL */
result = ftp_parse_url_path(data);
if(result) {
goto fail;
@@ -4073,7 +4079,7 @@ static CURLcode wc_statemach(struct Curl_easy *data)
if(result)
return result;
- /* we don't need the Curl_fileinfo of first file anymore */
+ /* we do not need the Curl_fileinfo of first file anymore */
Curl_llist_remove(&wildcard->filelist, wildcard->filelist.head, NULL);
if(wildcard->filelist.size == 0) { /* remains only one file to down. */
@@ -4228,7 +4234,7 @@ static CURLcode ftp_disconnect(struct Curl_easy *data,
bad in any way, sending quit and waiting around here will make the
disconnect wait in vain and cause more problems than we need to.
- ftp_quit() will check the state of ftp->ctl_valid. If it's ok it
+ ftp_quit() will check the state of ftp->ctl_valid. If it is ok it
will try to send the QUIT command, otherwise it will just return.
*/
if(dead_connection)
@@ -4323,10 +4329,10 @@ CURLcode ftp_parse_url_path(struct Curl_easy *data)
}
ftpc->dirdepth = 1; /* we consider it to be a single dir */
- fileName = slashPos + 1; /* rest is file name */
+ fileName = slashPos + 1; /* rest is filename */
}
else
- fileName = rawPath; /* file name only (or empty) */
+ fileName = rawPath; /* filename only (or empty) */
break;
default: /* allow pretty much anything */
@@ -4357,7 +4363,7 @@ CURLcode ftp_parse_url_path(struct Curl_easy *data)
++compLen;
/* we skip empty path components, like "x//y" since the FTP command
- CWD requires a parameter and a non-existent parameter a) doesn't
+ CWD requires a parameter and a non-existent parameter a) does not
work on many servers and b) has no effect on the others. */
if(compLen > 0) {
char *comp = Curl_memdup0(curPos, compLen);
@@ -4371,7 +4377,7 @@ CURLcode ftp_parse_url_path(struct Curl_easy *data)
}
}
DEBUGASSERT((size_t)ftpc->dirdepth <= dirAlloc);
- fileName = curPos; /* the rest is the file name (or empty) */
+ fileName = curPos; /* the rest is the filename (or empty) */
}
break;
} /* switch */
@@ -4383,8 +4389,8 @@ CURLcode ftp_parse_url_path(struct Curl_easy *data)
we make it a NULL pointer */
if(data->state.upload && !ftpc->file && (ftp->transfer == PPTRANSFER_BODY)) {
- /* We need a file name when uploading. Return error! */
- failf(data, "Uploading to a URL without a file name");
+ /* We need a filename when uploading. Return error! */
+ failf(data, "Uploading to a URL without a filename");
free(rawPath);
return CURLE_URL_MALFORMAT;
}
@@ -4425,16 +4431,16 @@ static CURLcode ftp_dophase_done(struct Curl_easy *data, bool connected)
CURLcode result = ftp_do_more(data, &completed);
if(result) {
- close_secondarysocket(data, conn);
+ close_secondarysocket(data);
return result;
}
}
if(ftp->transfer != PPTRANSFER_BODY)
/* no data to transfer */
- Curl_xfer_setup(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup_nop(data);
else if(!connected)
- /* since we didn't connect now, we want do_more to get called */
+ /* since we did not connect now, we want do_more to get called */
conn->bits.do_more = TRUE;
ftpc->ctl_valid = TRUE; /* seems good */
@@ -4539,10 +4545,10 @@ static CURLcode ftp_setup_connection(struct Curl_easy *data,
}
data->req.p.ftp = ftp;
- ftp->path = &data->state.up.path[1]; /* don't include the initial slash */
+ ftp->path = &data->state.up.path[1]; /* do not include the initial slash */
/* FTP URLs support an extension like ";type=<typecode>" that
- * we'll try to get now! */
+ * we will try to get now! */
type = strstr(ftp->path, ";type=");
if(!type)
diff --git a/libs/libcurl/src/ftp.h b/libs/libcurl/src/ftp.h
index dfd17fcc46..ebbe2fc48d 100644
--- a/libs/libcurl/src/ftp.h
+++ b/libs/libcurl/src/ftp.h
@@ -61,7 +61,7 @@ enum {
FTP_STOR_PREQUOTE,
FTP_POSTQUOTE,
FTP_CWD, /* change dir */
- FTP_MKD, /* if the dir didn't exist */
+ FTP_MKD, /* if the dir did not exist */
FTP_MDTM, /* to figure out the datestamp */
FTP_TYPE, /* to set type when doing a head-like request */
FTP_LIST_TYPE, /* set type when about to do a dir list */
@@ -123,7 +123,7 @@ struct ftp_conn {
char *account;
char *alternative_to_user;
char *entrypath; /* the PWD reply when we logged on */
- char *file; /* url-decoded file name (or path) */
+ char *file; /* url-decoded filename (or path) */
char **dirs; /* realloc()ed array for path components */
char *newhost;
char *prevpath; /* url-decoded conn->path from the previous transfer */
@@ -139,7 +139,7 @@ struct ftp_conn {
int count1; /* general purpose counter for the state machine */
int count2; /* general purpose counter for the state machine */
int count3; /* general purpose counter for the state machine */
- /* newhost is the (allocated) IP addr or host name to connect the data
+ /* newhost is the (allocated) IP addr or hostname to connect the data
connection to */
unsigned short newport;
ftpstate state; /* always use ftp.c:state() to change state! */
diff --git a/libs/libcurl/src/getenv.c b/libs/libcurl/src/getenv.c
index 51faaad73b..d3c660c0c4 100644
--- a/libs/libcurl/src/getenv.c
+++ b/libs/libcurl/src/getenv.c
@@ -37,7 +37,7 @@ static char *GetEnv(const char *variable)
return NULL;
#elif defined(_WIN32)
/* This uses Windows API instead of C runtime getenv() to get the environment
- variable since some changes aren't always visible to the latter. #4774 */
+ variable since some changes are not always visible to the latter. #4774 */
char *buf = NULL;
char *tmp;
DWORD bufsize;
@@ -54,8 +54,8 @@ static char *GetEnv(const char *variable)
buf = tmp;
bufsize = rc;
- /* It's possible for rc to be 0 if the variable was found but empty.
- Since getenv doesn't make that distinction we ignore it as well. */
+ /* it is possible for rc to be 0 if the variable was found but empty.
+ Since getenv does not make that distinction we ignore it as well. */
rc = GetEnvironmentVariableA(variable, buf, bufsize);
if(!rc || rc == bufsize || rc > max) {
free(buf);
diff --git a/libs/libcurl/src/getinfo.c b/libs/libcurl/src/getinfo.c
index 689e65b939..08d9a332f6 100644
--- a/libs/libcurl/src/getinfo.c
+++ b/libs/libcurl/src/getinfo.c
@@ -204,7 +204,7 @@ static CURLcode getinfo_long(struct Curl_easy *data, CURLINFO info,
#ifdef DEBUGBUILD
char *timestr = getenv("CURL_TIME");
if(timestr) {
- unsigned long val = strtol(timestr, NULL, 10);
+ unsigned long val = strtoul(timestr, NULL, 10);
switch(info) {
case CURLINFO_LOCAL_PORT:
*param_longp = (long)val;
@@ -216,7 +216,7 @@ static CURLcode getinfo_long(struct Curl_easy *data, CURLINFO info,
/* use another variable for this to allow different values */
timestr = getenv("CURL_DEBUG_SIZE");
if(timestr) {
- unsigned long val = strtol(timestr, NULL, 10);
+ unsigned long val = strtoul(timestr, NULL, 10);
switch(info) {
case CURLINFO_HEADER_SIZE:
case CURLINFO_REQUEST_SIZE:
@@ -277,8 +277,8 @@ static CURLcode getinfo_long(struct Curl_easy *data, CURLINFO info,
case CURLINFO_LASTSOCKET:
sockfd = Curl_getconnectinfo(data, NULL);
- /* note: this is not a good conversion for systems with 64 bit sockets and
- 32 bit longs */
+ /* note: this is not a good conversion for systems with 64-bit sockets and
+ 32-bit longs */
if(sockfd != CURL_SOCKET_BAD)
*param_longp = (long)sockfd;
else
@@ -335,7 +335,7 @@ static CURLcode getinfo_long(struct Curl_easy *data, CURLINFO info,
}
break;
case CURLINFO_PROTOCOL:
- *param_longp = data->info.conn_protocol;
+ *param_longp = (long)data->info.conn_protocol;
break;
case CURLINFO_USED_PROXY:
*param_longp =
@@ -361,7 +361,7 @@ static CURLcode getinfo_offt(struct Curl_easy *data, CURLINFO info,
#ifdef DEBUGBUILD
char *timestr = getenv("CURL_TIME");
if(timestr) {
- unsigned long val = strtol(timestr, NULL, 10);
+ unsigned long val = strtoul(timestr, NULL, 10);
switch(info) {
case CURLINFO_TOTAL_TIME_T:
case CURLINFO_NAMELOOKUP_TIME_T:
@@ -450,7 +450,7 @@ static CURLcode getinfo_double(struct Curl_easy *data, CURLINFO info,
#ifdef DEBUGBUILD
char *timestr = getenv("CURL_TIME");
if(timestr) {
- unsigned long val = strtol(timestr, NULL, 10);
+ unsigned long val = strtoul(timestr, NULL, 10);
switch(info) {
case CURLINFO_TOTAL_TIME:
case CURLINFO_NAMELOOKUP_TIME:
diff --git a/libs/libcurl/src/gopher.c b/libs/libcurl/src/gopher.c
index 5d01eb0046..70cd99cf3b 100644
--- a/libs/libcurl/src/gopher.c
+++ b/libs/libcurl/src/gopher.c
@@ -209,9 +209,9 @@ static CURLcode gopher_do(struct Curl_easy *data, bool *done)
if(!timeout_ms)
timeout_ms = TIMEDIFF_T_MAX;
- /* Don't busyloop. The entire loop thing is a work-around as it causes a
+ /* Do not busyloop. The entire loop thing is a work-around as it causes a
BLOCKING behavior which is a NO-NO. This function should rather be
- split up in a do and a doing piece where the pieces that aren't
+ split up in a do and a doing piece where the pieces that are not
possible to send now will be sent in the doing function repeatedly
until the entire request is sent.
*/
@@ -238,7 +238,7 @@ static CURLcode gopher_do(struct Curl_easy *data, bool *done)
if(result)
return result;
- Curl_xfer_setup(data, FIRSTSOCKET, -1, FALSE, -1);
+ Curl_xfer_setup1(data, CURL_XFER_RECV, -1, FALSE);
return CURLE_OK;
}
#endif /* CURL_DISABLE_GOPHER */
diff --git a/libs/libcurl/src/hash.c b/libs/libcurl/src/hash.c
index dc0fb62e1e..ca55a4bb04 100644
--- a/libs/libcurl/src/hash.c
+++ b/libs/libcurl/src/hash.c
@@ -40,7 +40,10 @@ hash_element_dtor(void *user, void *element)
struct Curl_hash_element *e = (struct Curl_hash_element *) element;
if(e->ptr) {
- h->dtor(e->ptr);
+ if(e->dtor)
+ e->dtor(e->key, e->key_len, e->ptr);
+ else
+ h->dtor(e->ptr);
e->ptr = NULL;
}
@@ -77,7 +80,8 @@ Curl_hash_init(struct Curl_hash *h,
}
static struct Curl_hash_element *
-mk_hash_element(const void *key, size_t key_len, const void *p)
+mk_hash_element(const void *key, size_t key_len, const void *p,
+ Curl_hash_elem_dtor dtor)
{
/* allocate the struct plus memory after it to store the key */
struct Curl_hash_element *he = malloc(sizeof(struct Curl_hash_element) +
@@ -87,22 +91,15 @@ mk_hash_element(const void *key, size_t key_len, const void *p)
memcpy(he->key, key, key_len);
he->key_len = key_len;
he->ptr = (void *) p;
+ he->dtor = dtor;
}
return he;
}
#define FETCH_LIST(x,y,z) &x->table[x->hash_func(y, z, x->slots)]
-/* Insert the data in the hash. If there already was a match in the hash, that
- * data is replaced. This function also "lazily" allocates the table if
- * needed, as it isn't done in the _init function (anymore).
- *
- * @unittest: 1305
- * @unittest: 1602
- * @unittest: 1603
- */
-void *
-Curl_hash_add(struct Curl_hash *h, void *key, size_t key_len, void *p)
+void *Curl_hash_add2(struct Curl_hash *h, void *key, size_t key_len, void *p,
+ Curl_hash_elem_dtor dtor)
{
struct Curl_hash_element *he;
struct Curl_llist_element *le;
@@ -130,7 +127,7 @@ Curl_hash_add(struct Curl_hash *h, void *key, size_t key_len, void *p)
}
}
- he = mk_hash_element(key, key_len, p);
+ he = mk_hash_element(key, key_len, p, dtor);
if(he) {
Curl_llist_append(l, he, &he->list);
++h->size;
@@ -140,6 +137,20 @@ Curl_hash_add(struct Curl_hash *h, void *key, size_t key_len, void *p)
return NULL; /* failure */
}
+/* Insert the data in the hash. If there already was a match in the hash, that
+ * data is replaced. This function also "lazily" allocates the table if
+ * needed, as it is not done in the _init function (anymore).
+ *
+ * @unittest: 1305
+ * @unittest: 1602
+ * @unittest: 1603
+ */
+void *
+Curl_hash_add(struct Curl_hash *h, void *key, size_t key_len, void *p)
+{
+ return Curl_hash_add2(h, key, key_len, p, NULL);
+}
+
/* Remove the identified hash entry.
* Returns non-zero on failure.
*
@@ -259,8 +270,9 @@ size_t Curl_hash_str(void *key, size_t key_length, size_t slots_num)
size_t h = 5381;
while(key_str < end) {
+ size_t j = (size_t)*key_str++;
h += h << 5;
- h ^= *key_str++;
+ h ^= j;
}
return (h % slots_num);
diff --git a/libs/libcurl/src/hash.h b/libs/libcurl/src/hash.h
index a6e05cae57..7e2abb38bf 100644
--- a/libs/libcurl/src/hash.h
+++ b/libs/libcurl/src/hash.h
@@ -58,9 +58,12 @@ struct Curl_hash {
size_t size;
};
+typedef void (*Curl_hash_elem_dtor)(void *key, size_t key_len, void *p);
+
struct Curl_hash_element {
struct Curl_llist_element list;
void *ptr;
+ Curl_hash_elem_dtor dtor;
size_t key_len;
char key[1]; /* allocated memory following the struct */
};
@@ -78,6 +81,8 @@ void Curl_hash_init(struct Curl_hash *h,
Curl_hash_dtor dtor);
void *Curl_hash_add(struct Curl_hash *h, void *key, size_t key_len, void *p);
+void *Curl_hash_add2(struct Curl_hash *h, void *key, size_t key_len, void *p,
+ Curl_hash_elem_dtor dtor);
int Curl_hash_delete(struct Curl_hash *h, void *key, size_t key_len);
void *Curl_hash_pick(struct Curl_hash *, void *key, size_t key_len);
#define Curl_hash_count(h) ((h)->size)
diff --git a/libs/libcurl/src/headers.c b/libs/libcurl/src/headers.c
index 3a4ce3adfe..92f1f91938 100644
--- a/libs/libcurl/src/headers.c
+++ b/libs/libcurl/src/headers.c
@@ -54,7 +54,7 @@ static void copy_header_external(struct Curl_header_store *hs,
impossible for applications to do == comparisons, as that would otherwise
be very tempting and then lead to the reserved bits not being reserved
anymore. */
- h->origin = hs->type | (1<<27);
+ h->origin = (unsigned int)(hs->type | (1<<27));
h->anchor = e;
}
@@ -114,7 +114,7 @@ CURLHcode curl_easy_header(CURL *easy,
break;
}
}
- if(!e) /* this shouldn't happen */
+ if(!e) /* this should not happen */
return CURLHE_MISSING;
}
/* this is the name we want */
@@ -302,7 +302,7 @@ CURLcode Curl_headers_push(struct Curl_easy *data, const char *header,
/* line folding, append value to the previous header's value */
return unfold_value(data, header, hlen);
else {
- /* Can't unfold without a previous header. Instead of erroring, just
+ /* cannot unfold without a previous header. Instead of erroring, just
pass the leading blanks. */
while(hlen && ISBLANK(*header)) {
header++;
diff --git a/libs/libcurl/src/hmac.c b/libs/libcurl/src/hmac.c
index 48ddb42ab4..6deb339cfa 100644
--- a/libs/libcurl/src/hmac.c
+++ b/libs/libcurl/src/hmac.c
@@ -42,7 +42,7 @@
* Generic HMAC algorithm.
*
* This module computes HMAC digests based on any hash function. Parameters
- * and computing procedures are set-up dynamically at HMAC computation context
+ * and computing procedures are setup dynamically at HMAC computation context
* initialization.
*/
diff --git a/libs/libcurl/src/hostip.c b/libs/libcurl/src/hostip.c
index d22eb4120b..fcd21c9a32 100644
--- a/libs/libcurl/src/hostip.c
+++ b/libs/libcurl/src/hostip.c
@@ -84,8 +84,8 @@
* source file are these:
*
* CURLRES_IPV6 - this host has getaddrinfo() and family, and thus we use
- * that. The host may not be able to resolve IPv6, but we don't really have to
- * take that into account. Hosts that aren't IPv6-enabled have CURLRES_IPV4
+ * that. The host may not be able to resolve IPv6, but we do not really have to
+ * take that into account. Hosts that are not IPv6-enabled have CURLRES_IPV4
* defined.
*
* CURLRES_ARES - is defined if libcurl is built to use c-ares for
@@ -238,7 +238,7 @@ void Curl_hostcache_prune(struct Curl_easy *data)
int timeout = data->set.dns_cache_timeout;
if(!data->dns.hostcache)
- /* NULL hostcache means we can't do it */
+ /* NULL hostcache means we cannot do it */
return;
if(data->share)
@@ -283,14 +283,14 @@ static struct Curl_dns_entry *fetch_addr(struct Curl_easy *data,
size_t entry_len = create_hostcache_id(hostname, 0, port,
entry_id, sizeof(entry_id));
- /* See if it's already in our dns cache */
+ /* See if it is already in our dns cache */
dns = Curl_hash_pick(data->dns.hostcache, entry_id, entry_len + 1);
/* No entry found in cache, check if we might have a wildcard entry */
if(!dns && data->state.wildcard_resolve) {
entry_len = create_hostcache_id("*", 1, port, entry_id, sizeof(entry_id));
- /* See if it's already in our dns cache */
+ /* See if it is already in our dns cache */
dns = Curl_hash_pick(data->dns.hostcache, entry_id, entry_len + 1);
}
@@ -329,7 +329,7 @@ static struct Curl_dns_entry *fetch_addr(struct Curl_easy *data,
}
if(!found) {
- infof(data, "Hostname in DNS cache doesn't have needed family, zapped");
+ infof(data, "Hostname in DNS cache does not have needed family, zapped");
dns = NULL; /* the memory deallocation is being handled by the hash */
Curl_hash_delete(data->dns.hostcache, entry_id, entry_len + 1);
}
@@ -349,7 +349,7 @@ static struct Curl_dns_entry *fetch_addr(struct Curl_easy *data,
* Returns the Curl_dns_entry entry pointer or NULL if not in the cache.
*
* The returned data *MUST* be "unlocked" with Curl_resolv_unlock() after
- * use, or we'll leak memory!
+ * use, or we will leak memory!
*/
struct Curl_dns_entry *
Curl_fetch_addr(struct Curl_easy *data,
@@ -428,8 +428,8 @@ UNITTEST CURLcode Curl_shuffle_addr(struct Curl_easy *data,
if(Curl_rand(data, (unsigned char *)rnd, rnd_size) == CURLE_OK) {
struct Curl_addrinfo *swap_tmp;
for(i = num_addrs - 1; i > 0; i--) {
- swap_tmp = nodes[rnd[i] % (i + 1)];
- nodes[rnd[i] % (i + 1)] = nodes[i];
+ swap_tmp = nodes[rnd[i] % (unsigned int)(i + 1)];
+ nodes[rnd[i] % (unsigned int)(i + 1)] = nodes[i];
nodes[i] = swap_tmp;
}
@@ -536,8 +536,8 @@ static struct Curl_addrinfo *get_localhost6(int port, const char *name)
sa6.sin6_port = htons(port16);
sa6.sin6_flowinfo = 0;
sa6.sin6_scope_id = 0;
- if(Curl_inet_pton(AF_INET6, "::1", ipv6) < 1)
- return NULL;
+
+ (void)Curl_inet_pton(AF_INET6, "::1", ipv6);
memcpy(&sa6.sin6_addr, ipv6, sizeof(ipv6));
ca->ai_flags = 0;
@@ -602,7 +602,7 @@ static struct Curl_addrinfo *get_localhost(int port, const char *name)
bool Curl_ipv6works(struct Curl_easy *data)
{
if(data) {
- /* the nature of most system is that IPv6 status doesn't come and go
+ /* the nature of most system is that IPv6 status does not come and go
during a program's lifetime so we only probe the first time and then we
have the info kept for fast reuse */
DEBUGASSERT(data);
@@ -618,7 +618,7 @@ bool Curl_ipv6works(struct Curl_easy *data)
/* probe to see if we have a working IPv6 stack */
curl_socket_t s = socket(PF_INET6, SOCK_DGRAM, 0);
if(s == CURL_SOCKET_BAD)
- /* an IPv6 address was requested but we can't get/use one */
+ /* an IPv6 address was requested but we cannot get/use one */
ipv6_works = 0;
else {
ipv6_works = 1;
@@ -662,11 +662,11 @@ static bool tailmatch(const char *full, const char *part)
/*
* Curl_resolv() is the main name resolve function within libcurl. It resolves
* a name and returns a pointer to the entry in the 'entry' argument (if one
- * is provided). This function might return immediately if we're using asynch
+ * is provided). This function might return immediately if we are using asynch
* resolves. See the return codes.
*
* The cache entry we return will get its 'inuse' counter increased when this
- * function is used. You MUST call Curl_resolv_unlock() later (when you're
+ * function is used. You MUST call Curl_resolv_unlock() later (when you are
* done using this struct) to decrease the counter again.
*
* Return codes:
@@ -813,7 +813,7 @@ enum resolve_t Curl_resolv(struct Curl_easy *data,
if(respwait) {
/* the response to our resolve call will come asynchronously at
a later time, good or bad */
- /* First, check that we haven't received the info by now */
+ /* First, check that we have not received the info by now */
result = Curl_resolv_check(data, &dns);
if(result) /* error detected */
return CURLRESOLV_ERROR;
@@ -851,7 +851,7 @@ enum resolve_t Curl_resolv(struct Curl_easy *data,
#ifdef USE_ALARM_TIMEOUT
/*
* This signal handler jumps back into the main libcurl code and continues
- * execution. This effectively causes the remainder of the application to run
+ * execution. This effectively causes the remainder of the application to run
* within a signal handler which is nonportable and could lead to problems.
*/
CURL_NORETURN static
@@ -864,11 +864,11 @@ void alarmfunc(int sig)
/*
* Curl_resolv_timeout() is the same as Curl_resolv() but specifies a
- * timeout. This function might return immediately if we're using asynch
+ * timeout. This function might return immediately if we are using asynch
* resolves. See the return codes.
*
* The cache entry we return will get its 'inuse' counter increased when this
- * function is used. You MUST call Curl_resolv_unlock() later (when you're
+ * function is used. You MUST call Curl_resolv_unlock() later (when you are
* done using this struct) to decrease the counter again.
*
* If built with a synchronous resolver and use of signals is not
@@ -934,7 +934,7 @@ enum resolve_t Curl_resolv_timeout(struct Curl_easy *data,
will generate a signal and we will siglongjmp() from that here.
This technique has problems (see alarmfunc).
This should be the last thing we do before calling Curl_resolv(),
- as otherwise we'd have to worry about variables that get modified
+ as otherwise we would have to worry about variables that get modified
before we invoke Curl_resolv() (and thus use "volatile"). */
curl_simple_lock_lock(&curl_jmpenv_lock);
@@ -955,7 +955,7 @@ enum resolve_t Curl_resolv_timeout(struct Curl_easy *data,
keep_copysig = TRUE; /* yes, we have a copy */
sigact.sa_handler = alarmfunc;
#ifdef SA_RESTART
- /* HPUX doesn't have SA_RESTART but defaults to that behavior! */
+ /* HPUX does not have SA_RESTART but defaults to that behavior! */
sigact.sa_flags &= ~SA_RESTART;
#endif
/* now set the new struct */
@@ -1022,7 +1022,7 @@ clean_up:
((alarm_set >= 0x80000000) && (prev_alarm < 0x80000000)) ) {
/* if the alarm time-left reached zero or turned "negative" (counted
with unsigned values), we should fire off a SIGALRM here, but we
- won't, and zero would be to switch it off so we never set it to
+ will not, and zero would be to switch it off so we never set it to
less than 1! */
alarm(1);
rc = CURLRESOLV_TIMEDOUT;
@@ -1150,7 +1150,7 @@ CURLcode Curl_loadhostpairs(struct Curl_easy *data)
if(data->share)
Curl_share_lock(data, CURL_LOCK_DATA_DNS, CURL_LOCK_ACCESS_SINGLE);
- /* delete entry, ignore if it didn't exist */
+ /* delete entry, ignore if it did not exist */
Curl_hash_delete(data->dns.hostcache, entry_id, entry_len + 1);
if(data->share)
@@ -1264,7 +1264,7 @@ err:
if(data->share)
Curl_share_lock(data, CURL_LOCK_DATA_DNS, CURL_LOCK_ACCESS_SINGLE);
- /* See if it's already in our dns cache */
+ /* See if it is already in our dns cache */
dns = Curl_hash_pick(data->dns.hostcache, entry_id, entry_len + 1);
if(dns) {
@@ -1362,7 +1362,7 @@ static void show_resolve_info(struct Curl_easy *data,
if(!result)
result = Curl_dyn_add(d, buf);
if(result) {
- infof(data, "too many IP, can't show");
+ infof(data, "too many IP, cannot show");
goto fail;
}
}
diff --git a/libs/libcurl/src/hostip.h b/libs/libcurl/src/hostip.h
index e1a7615bea..8b46b9a76c 100644
--- a/libs/libcurl/src/hostip.h
+++ b/libs/libcurl/src/hostip.h
@@ -80,7 +80,7 @@ struct Curl_https_rrinfo {
char *alpns; /* keytag = 1 */
bool no_def_alpn; /* keytag = 2 */
/*
- * we don't support ports (keytag = 3) as we don't support
+ * we do not support ports (keytag = 3) as we do not support
* port-switching yet
*/
unsigned char *ipv4hints; /* keytag = 4 */
@@ -97,7 +97,7 @@ struct Curl_dns_entry {
#ifdef USE_HTTPSRR
struct Curl_https_rrinfo *hinfo;
#endif
- /* timestamp == 0 -- permanent CURLOPT_RESOLVE entry (doesn't time out) */
+ /* timestamp == 0 -- permanent CURLOPT_RESOLVE entry (does not time out) */
time_t timestamp;
/* use-counter, use Curl_resolv_unlock to release reference */
long inuse;
@@ -114,7 +114,7 @@ bool Curl_host_is_ipnum(const char *hostname);
* and port.
*
* The returned data *MUST* be "unlocked" with Curl_resolv_unlock() after
- * use, or we'll leak memory!
+ * use, or we will leak memory!
*/
/* return codes */
enum resolve_t {
@@ -200,7 +200,7 @@ void Curl_printable_address(const struct Curl_addrinfo *ip,
* Returns the Curl_dns_entry entry pointer or NULL if not in the cache.
*
* The returned data *MUST* be "unlocked" with Curl_resolv_unlock() after
- * use, or we'll leak memory!
+ * use, or we will leak memory!
*/
struct Curl_dns_entry *
Curl_fetch_addr(struct Curl_easy *data,
diff --git a/libs/libcurl/src/hostip4.c b/libs/libcurl/src/hostip4.c
index 0d28d54a2c..f13c5e7405 100644
--- a/libs/libcurl/src/hostip4.c
+++ b/libs/libcurl/src/hostip4.c
@@ -62,7 +62,7 @@ bool Curl_ipvalid(struct Curl_easy *data, struct connectdata *conn)
{
(void)data;
if(conn->ip_version == CURL_IPRESOLVE_V6)
- /* An IPv6 address was requested and we can't get/use one */
+ /* An IPv6 address was requested and we cannot get/use one */
return FALSE;
return TRUE; /* OK, proceed */
@@ -193,8 +193,8 @@ struct Curl_addrinfo *Curl_ipv4_resolve_r(const char *hostname,
* small. Previous versions are known to return ERANGE for the same
* problem.
*
- * This wouldn't be such a big problem if older versions wouldn't
- * sometimes return EAGAIN on a common failure case. Alas, we can't
+ * This would not be such a big problem if older versions would not
+ * sometimes return EAGAIN on a common failure case. Alas, we cannot
* assume that EAGAIN *or* ERANGE means ERANGE for any given version of
* glibc.
*
@@ -210,9 +210,9 @@ struct Curl_addrinfo *Curl_ipv4_resolve_r(const char *hostname,
* gethostbyname_r() in glibc:
*
* In glibc 2.2.5 the interface is different (this has also been
- * discovered in glibc 2.1.1-6 as shipped by Redhat 6). What I can't
+ * discovered in glibc 2.1.1-6 as shipped by Redhat 6). What I cannot
* explain, is that tests performed on glibc 2.2.4-34 and 2.2.4-32
- * (shipped/upgraded by Redhat 7.2) don't show this behavior!
+ * (shipped/upgraded by Redhat 7.2) do not show this behavior!
*
* In this "buggy" version, the return code is -1 on error and 'errno'
* is set to the ERANGE or EAGAIN code. Note that 'errno' is not a
@@ -223,7 +223,7 @@ struct Curl_addrinfo *Curl_ipv4_resolve_r(const char *hostname,
#elif defined(HAVE_GETHOSTBYNAME_R_3)
/* AIX, Digital Unix/Tru64, HPUX 10, more? */
- /* For AIX 4.3 or later, we don't use gethostbyname_r() at all, because of
+ /* For AIX 4.3 or later, we do not use gethostbyname_r() at all, because of
* the plain fact that it does not return unique full buffers on each
* call, but instead several of the pointers in the hostent structs will
* point to the same actual data! This have the unfortunate down-side that
@@ -237,7 +237,7 @@ struct Curl_addrinfo *Curl_ipv4_resolve_r(const char *hostname,
*
* Troels Walsted Hansen helped us work this out on March 3rd, 2003.
*
- * [*] = much later we've found out that it isn't at all "completely
+ * [*] = much later we have found out that it is not at all "completely
* thread-safe", but at least the gethostbyname() function is.
*/
@@ -253,7 +253,7 @@ struct Curl_addrinfo *Curl_ipv4_resolve_r(const char *hostname,
(struct hostent *)buf,
(struct hostent_data *)((char *)buf +
sizeof(struct hostent)));
- h_errnop = SOCKERRNO; /* we don't deal with this, but set it anyway */
+ h_errnop = SOCKERRNO; /* we do not deal with this, but set it anyway */
}
else
res = -1; /* failure, too smallish buffer size */
@@ -263,8 +263,8 @@ struct Curl_addrinfo *Curl_ipv4_resolve_r(const char *hostname,
h = buf; /* result expected in h */
/* This is the worst kind of the different gethostbyname_r() interfaces.
- * Since we don't know how big buffer this particular lookup required,
- * we can't realloc down the huge alloc without doing closer analysis of
+ * Since we do not know how big buffer this particular lookup required,
+ * we cannot realloc down the huge alloc without doing closer analysis of
* the returned data. Thus, we always use CURL_HOSTENT_SIZE for every
* name lookup. Fixing this would require an extra malloc() and then
* calling Curl_addrinfo_copy() that subsequent realloc()s down the new
@@ -280,7 +280,7 @@ struct Curl_addrinfo *Curl_ipv4_resolve_r(const char *hostname,
#else /* (HAVE_GETADDRINFO && HAVE_GETADDRINFO_THREADSAFE) ||
HAVE_GETHOSTBYNAME_R */
/*
- * Here is code for platforms that don't have a thread safe
+ * Here is code for platforms that do not have a thread safe
* getaddrinfo() nor gethostbyname_r() function or for which
* gethostbyname() is the preferred one.
*/
diff --git a/libs/libcurl/src/hsts.c b/libs/libcurl/src/hsts.c
index 8a871f77a4..635bd6100d 100644
--- a/libs/libcurl/src/hsts.c
+++ b/libs/libcurl/src/hsts.c
@@ -54,7 +54,7 @@
#define MAX_HSTS_DATELENSTR "64"
#define UNLIMITED "unlimited"
-#ifdef DEBUGBUILD
+#if defined(DEBUGBUILD) || defined(UNITTESTS)
/* to play well with debug builds, we can *set* a fixed time this will
return */
time_t deltatime; /* allow for "adjustments" for unit test purposes */
@@ -241,7 +241,7 @@ CURLcode Curl_hsts_parse(struct hsts *h, const char *hostname,
}
/*
- * Return TRUE if the given host name is currently an HSTS one.
+ * Return TRUE if the given hostname is currently an HSTS one.
*
* The 'subdomain' argument tells the function if subdomain matching should be
* attempted.
@@ -368,7 +368,7 @@ CURLcode Curl_hsts_save(struct Curl_easy *data, struct hsts *h,
file = h->filename;
if((h->flags & CURLHSTS_READONLYFILE) || !file || !file[0])
- /* marked as read-only, no file or zero length file name */
+ /* marked as read-only, no file or zero length filename */
goto skipsave;
result = Curl_fopen(data, file, &out, &tempstore);
@@ -393,7 +393,7 @@ CURLcode Curl_hsts_save(struct Curl_easy *data, struct hsts *h,
free(tempstore);
skipsave:
if(data->set.hsts_write) {
- /* if there's a write callback */
+ /* if there is a write callback */
struct curl_index i; /* count */
i.total = h->list.size;
i.index = 0;
@@ -440,7 +440,7 @@ static CURLcode hsts_add(struct hsts *h, char *line)
if(!e)
result = hsts_create(h, p, subdomain, expires);
else {
- /* the same host name, use the largest expire time */
+ /* the same hostname, use the largest expire time */
if(expires > e->expires)
e->expires = expires;
}
@@ -508,7 +508,7 @@ static CURLcode hsts_load(struct hsts *h, const char *file)
CURLcode result = CURLE_OK;
FILE *fp;
- /* we need a private copy of the file name so that the hsts cache file
+ /* we need a private copy of the filename so that the hsts cache file
name survives an easy handle reset */
free(h->filename);
h->filename = strdup(file);
diff --git a/libs/libcurl/src/hsts.h b/libs/libcurl/src/hsts.h
index 1d6f3d8d12..098b93c3a2 100644
--- a/libs/libcurl/src/hsts.h
+++ b/libs/libcurl/src/hsts.h
@@ -29,7 +29,7 @@
#include <curl/curl.h>
#include "llist.h"
-#ifdef DEBUGBUILD
+#if defined(DEBUGBUILD) || defined(UNITTESTS)
extern time_t deltatime;
#endif
@@ -40,7 +40,7 @@ struct stsentry {
curl_off_t expires; /* the timestamp of this entry's expiry */
};
-/* The HSTS cache. Needs to be able to tailmatch host names. */
+/* The HSTS cache. Needs to be able to tailmatch hostnames. */
struct hsts {
struct Curl_llist list;
char *filename;
diff --git a/libs/libcurl/src/http.c b/libs/libcurl/src/http.c
index e56550a09b..3163bd7e9f 100644
--- a/libs/libcurl/src/http.c
+++ b/libs/libcurl/src/http.c
@@ -169,14 +169,6 @@ CURLcode Curl_http_setup_conn(struct Curl_easy *data,
{
/* allocate the HTTP-specific struct for the Curl_easy, only to survive
during this request */
- struct HTTP *http;
- DEBUGASSERT(data->req.p.http == NULL);
-
- http = calloc(1, sizeof(struct HTTP));
- if(!http)
- return CURLE_OUT_OF_MEMORY;
-
- data->req.p.http = http;
connkeep(conn, "HTTP default");
if(data->state.httpwant == CURL_HTTP_VERSION_3ONLY) {
@@ -418,9 +410,9 @@ static CURLcode http_perhapsrewind(struct Curl_easy *data,
curl_off_t upload_remain = (expectsend >= 0)? (expectsend - bytessent) : -1;
bool little_upload_remains = (upload_remain >= 0 && upload_remain < 2000);
bool needs_rewind = Curl_creader_needs_rewind(data);
- /* By default, we'd like to abort the transfer when little or
- * unknown amount remains. But this may be overridden by authentications
- * further below! */
+ /* By default, we would like to abort the transfer when little or unknown
+ * amount remains. This may be overridden by authentications further
+ * below! */
bool abort_upload = (!data->req.upload_done && !little_upload_remains);
const char *ongoing_auth = NULL;
@@ -483,7 +475,7 @@ static CURLcode http_perhapsrewind(struct Curl_easy *data,
/* We decided to abort the ongoing transfer */
streamclose(conn, "Mid-auth HTTP and much data left to send");
/* FIXME: questionable manipulation here, can we do this differently? */
- data->req.size = 0; /* don't download any more than 0 bytes */
+ data->req.size = 0; /* do not download any more than 0 bytes */
}
return CURLE_OK;
}
@@ -556,7 +548,7 @@ CURLcode Curl_http_auth_act(struct Curl_easy *data)
/* no (known) authentication available,
authentication is not "done" yet and
no authentication seems to be required and
- we didn't try HEAD or GET */
+ we did not try HEAD or GET */
if((data->state.httpreq != HTTPREQ_GET) &&
(data->state.httpreq != HTTPREQ_HEAD)) {
data->req.newurl = strdup(data->state.url); /* clone URL */
@@ -746,13 +738,13 @@ Curl_http_output_auth(struct Curl_easy *data,
if(authhost->want && !authhost->picked)
/* The app has selected one or more methods, but none has been picked
so far by a server round-trip. Then we set the picked one to the
- want one, and if this is one single bit it'll be used instantly. */
+ want one, and if this is one single bit it will be used instantly. */
authhost->picked = authhost->want;
if(authproxy->want && !authproxy->picked)
/* The app has selected one or more methods, but none has been picked so
far by a proxy round-trip. Then we set the picked one to the want one,
- and if this is one single bit it'll be used instantly. */
+ and if this is one single bit it will be used instantly. */
authproxy->picked = authproxy->want;
#ifndef CURL_DISABLE_PROXY
@@ -767,7 +759,7 @@ Curl_http_output_auth(struct Curl_easy *data,
#else
(void)proxytunnel;
#endif /* CURL_DISABLE_PROXY */
- /* we have no proxy so let's pretend we're done authenticating
+ /* we have no proxy so let's pretend we are done authenticating
with it */
authproxy->done = TRUE;
@@ -941,7 +933,7 @@ CURLcode Curl_http_input_auth(struct Curl_easy *data, bool proxy,
authp->avail |= CURLAUTH_DIGEST;
/* We call this function on input Digest headers even if Digest
- * authentication isn't activated yet, as we need to store the
+ * authentication is not activated yet, as we need to store the
* incoming data from this header in case we are going to use
* Digest */
result = Curl_input_digest(data, proxy, auth);
@@ -960,7 +952,7 @@ CURLcode Curl_http_input_auth(struct Curl_easy *data, bool proxy,
authp->avail |= CURLAUTH_BASIC;
if(authp->picked == CURLAUTH_BASIC) {
/* We asked for Basic authentication but got a 40X back
- anyway, which basically means our name+password isn't
+ anyway, which basically means our name+password is not
valid. */
authp->avail = CURLAUTH_NONE;
infof(data, "Authentication problem. Ignoring this.");
@@ -976,7 +968,7 @@ CURLcode Curl_http_input_auth(struct Curl_easy *data, bool proxy,
authp->avail |= CURLAUTH_BEARER;
if(authp->picked == CURLAUTH_BEARER) {
/* We asked for Bearer authentication but got a 40X back
- anyway, which basically means our token isn't valid. */
+ anyway, which basically means our token is not valid. */
authp->avail = CURLAUTH_NONE;
infof(data, "Authentication problem. Ignoring this.");
data->state.authproblem = TRUE;
@@ -996,7 +988,7 @@ CURLcode Curl_http_input_auth(struct Curl_easy *data, bool proxy,
/* there may be multiple methods on one line, so keep reading */
while(*auth && *auth != ',') /* read up to the next comma */
auth++;
- if(*auth == ',') /* if we're on a comma, skip it */
+ if(*auth == ',') /* if we are on a comma, skip it */
auth++;
while(*auth && ISSPACE(*auth))
auth++;
@@ -1019,8 +1011,8 @@ static bool http_should_fail(struct Curl_easy *data, int httpcode)
DEBUGASSERT(data->conn);
/*
- ** If we haven't been asked to fail on error,
- ** don't fail.
+ ** If we have not been asked to fail on error,
+ ** do not fail.
*/
if(!data->set.http_fail_on_error)
return FALSE;
@@ -1040,7 +1032,7 @@ static bool http_should_fail(struct Curl_easy *data, int httpcode)
return FALSE;
/*
- ** Any code >= 400 that's not 401 or 407 is always
+ ** Any code >= 400 that is not 401 or 407 is always
** a terminal error
*/
if((httpcode != 401) && (httpcode != 407))
@@ -1052,22 +1044,19 @@ static bool http_should_fail(struct Curl_easy *data, int httpcode)
DEBUGASSERT((httpcode == 401) || (httpcode == 407));
/*
- ** Examine the current authentication state to see if this
- ** is an error. The idea is for this function to get
- ** called after processing all the headers in a response
- ** message. So, if we've been to asked to authenticate a
- ** particular stage, and we've done it, we're OK. But, if
- ** we're already completely authenticated, it's not OK to
- ** get another 401 or 407.
+ ** Examine the current authentication state to see if this is an error. The
+ ** idea is for this function to get called after processing all the headers
+ ** in a response message. So, if we have been to asked to authenticate a
+ ** particular stage, and we have done it, we are OK. If we are already
+ ** completely authenticated, it is not OK to get another 401 or 407.
**
- ** It is possible for authentication to go stale such that
- ** the client needs to reauthenticate. Once that info is
- ** available, use it here.
+ ** It is possible for authentication to go stale such that the client needs
+ ** to reauthenticate. Once that info is available, use it here.
*/
/*
- ** Either we're not authenticating, or we're supposed to
- ** be authenticating something else. This is an error.
+ ** Either we are not authenticating, or we are supposed to be authenticating
+ ** something else. This is an error.
*/
if((httpcode == 401) && !data->state.aptr.user)
return TRUE;
@@ -1106,7 +1095,7 @@ Curl_compareheader(const char *headerline, /* line to check */
DEBUGASSERT(content);
if(!strncasecompare(headerline, header, hlen))
- return FALSE; /* doesn't start with header */
+ return FALSE; /* does not start with header */
/* pass the header */
start = &headerline[hlen];
@@ -1118,11 +1107,11 @@ Curl_compareheader(const char *headerline, /* line to check */
/* find the end of the header line */
end = strchr(start, '\r'); /* lines end with CRLF */
if(!end) {
- /* in case there's a non-standard compliant line here */
+ /* in case there is a non-standard compliant line here */
end = strchr(start, '\n');
if(!end)
- /* hm, there's no line ending here, use the zero byte! */
+ /* hm, there is no line ending here, use the zero byte! */
end = strchr(start, '\0');
}
@@ -1153,7 +1142,7 @@ CURLcode Curl_http_connect(struct Curl_easy *data, bool *done)
}
/* this returns the socket to wait for in the DO and DOING state for the multi
- interface and then we're always _sending_ a request and thus we wait for
+ interface and then we are always _sending_ a request and thus we wait for
the single socket to become writable only */
int Curl_http_getsock_do(struct Curl_easy *data,
struct connectdata *conn,
@@ -1174,16 +1163,12 @@ CURLcode Curl_http_done(struct Curl_easy *data,
CURLcode status, bool premature)
{
struct connectdata *conn = data->conn;
- struct HTTP *http = data->req.p.http;
- /* Clear multipass flag. If authentication isn't done yet, then it will get
+ /* Clear multipass flag. If authentication is not done yet, then it will get
* a chance to be set back to true when we output the next auth header */
data->state.authhost.multipass = FALSE;
data->state.authproxy.multipass = FALSE;
- if(!http)
- return CURLE_OK;
-
Curl_dyn_reset(&data->state.headerb);
Curl_hyper_done(data);
@@ -1197,8 +1182,8 @@ CURLcode Curl_http_done(struct Curl_easy *data,
(data->req.bytecount +
data->req.headerbytecount -
data->req.deductheadercount) <= 0) {
- /* If this connection isn't simply closed to be retried, AND nothing was
- read from the HTTP server (that counts), this can't be right so we
+ /* If this connection is not simply closed to be retried, AND nothing was
+ read from the HTTP server (that counts), this cannot be right so we
return an error here */
failf(data, "Empty reply from server");
/* Mark it as closed to avoid the "left intact" message */
@@ -1357,7 +1342,7 @@ CURLcode Curl_dynhds_add_custom(struct Curl_easy *data,
DEBUGASSERT(name && value);
if(data->state.aptr.host &&
- /* a Host: header was sent already, don't pass on any custom Host:
+ /* a Host: header was sent already, do not pass on any custom Host:
header as that will produce *two* in the same request! */
hd_name_eq(name, namelen, STRCONST("Host:")))
;
@@ -1370,18 +1355,18 @@ CURLcode Curl_dynhds_add_custom(struct Curl_easy *data,
hd_name_eq(name, namelen, STRCONST("Content-Type:")))
;
else if(data->req.authneg &&
- /* while doing auth neg, don't allow the custom length since
+ /* while doing auth neg, do not allow the custom length since
we will force length zero then */
hd_name_eq(name, namelen, STRCONST("Content-Length:")))
;
else if(data->state.aptr.te &&
- /* when asking for Transfer-Encoding, don't pass on a custom
+ /* when asking for Transfer-Encoding, do not pass on a custom
Connection: */
hd_name_eq(name, namelen, STRCONST("Connection:")))
;
else if((conn->httpversion >= 20) &&
hd_name_eq(name, namelen, STRCONST("Transfer-Encoding:")))
- /* HTTP/2 doesn't support chunked requests */
+ /* HTTP/2 does not support chunked requests */
;
else if((hd_name_eq(name, namelen, STRCONST("Authorization:")) ||
hd_name_eq(name, namelen, STRCONST("Cookie:"))) &&
@@ -1503,8 +1488,9 @@ CURLcode Curl_add_custom_headers(struct Curl_easy *data,
char *compare = semicolonp ? semicolonp : headers->data;
if(data->state.aptr.host &&
- /* a Host: header was sent already, don't pass on any custom Host:
- header as that will produce *two* in the same request! */
+ /* a Host: header was sent already, do not pass on any custom
+ Host: header as that will produce *two* in the same
+ request! */
checkprefix("Host:", compare))
;
else if(data->state.httpreq == HTTPREQ_POST_FORM &&
@@ -1516,18 +1502,18 @@ CURLcode Curl_add_custom_headers(struct Curl_easy *data,
checkprefix("Content-Type:", compare))
;
else if(data->req.authneg &&
- /* while doing auth neg, don't allow the custom length since
+ /* while doing auth neg, do not allow the custom length since
we will force length zero then */
checkprefix("Content-Length:", compare))
;
else if(data->state.aptr.te &&
- /* when asking for Transfer-Encoding, don't pass on a custom
+ /* when asking for Transfer-Encoding, do not pass on a custom
Connection: */
checkprefix("Connection:", compare))
;
else if((conn->httpversion >= 20) &&
checkprefix("Transfer-Encoding:", compare))
- /* HTTP/2 doesn't support chunked requests */
+ /* HTTP/2 does not support chunked requests */
;
else if((checkprefix("Authorization:", compare) ||
checkprefix("Cookie:", compare)) &&
@@ -1719,10 +1705,10 @@ CURLcode Curl_http_host(struct Curl_easy *data, struct connectdata *conn)
if(ptr && (!data->state.this_is_a_follow ||
strcasecompare(data->state.first_host, conn->host.name))) {
#if !defined(CURL_DISABLE_COOKIES)
- /* If we have a given custom Host: header, we extract the host name in
+ /* If we have a given custom Host: header, we extract the hostname in
order to possibly use it for cookie reasons later on. We only allow the
custom Host: header if this is NOT a redirect, as setting Host: in the
- redirected request is being out on thin ice. Except if the host name
+ redirected request is being out on thin ice. Except if the hostname
is the same as the first one! */
char *cookiehost = Curl_copy_header_value(ptr);
if(!cookiehost)
@@ -1760,15 +1746,15 @@ CURLcode Curl_http_host(struct Curl_easy *data, struct connectdata *conn)
}
}
else {
- /* When building Host: headers, we must put the host name within
- [brackets] if the host name is a plain IPv6-address. RFC2732-style. */
+ /* When building Host: headers, we must put the hostname within
+ [brackets] if the hostname is a plain IPv6-address. RFC2732-style. */
const char *host = conn->host.name;
if(((conn->given->protocol&(CURLPROTO_HTTPS|CURLPROTO_WSS)) &&
(conn->remote_port == PORT_HTTPS)) ||
((conn->given->protocol&(CURLPROTO_HTTP|CURLPROTO_WS)) &&
(conn->remote_port == PORT_HTTP)) )
- /* if(HTTPS on port 443) OR (HTTP on port 80) then don't include
+ /* if(HTTPS on port 443) OR (HTTP on port 80) then do not include
the port number in the host string */
aptr->host = aprintf("Host: %s%s%s\r\n", conn->bits.ipv6_ip?"[":"",
host, conn->bits.ipv6_ip?"]":"");
@@ -1778,7 +1764,7 @@ CURLcode Curl_http_host(struct Curl_easy *data, struct connectdata *conn)
conn->remote_port);
if(!aptr->host)
- /* without Host: we can't make a nice request */
+ /* without Host: we cannot make a nice request */
return CURLE_OUT_OF_MEMORY;
}
return CURLE_OK;
@@ -1806,7 +1792,7 @@ CURLcode Curl_http_target(struct Curl_easy *data,
/* The path sent to the proxy is in fact the entire URL. But if the remote
host is a IDN-name, we must make sure that the request we produce only
- uses the encoded host name! */
+ uses the encoded hostname! */
/* and no fragment part */
CURLUcode uc;
@@ -1829,7 +1815,7 @@ CURLcode Curl_http_target(struct Curl_easy *data,
}
if(strcasecompare("http", data->state.up.scheme)) {
- /* when getting HTTP, we don't want the userinfo the URL */
+ /* when getting HTTP, we do not want the userinfo the URL */
uc = curl_url_set(h, CURLUPART_USER, NULL, 0);
if(uc) {
curl_url_cleanup(h);
@@ -1850,7 +1836,7 @@ CURLcode Curl_http_target(struct Curl_easy *data,
curl_url_cleanup(h);
- /* target or url */
+ /* target or URL */
result = Curl_dyn_add(r, data->set.str[STRING_TARGET]?
data->set.str[STRING_TARGET]:url);
free(url);
@@ -2053,7 +2039,7 @@ static CURLcode http_resume(struct Curl_easy *data, Curl_HttpReq httpreq)
if(data->state.resume_from < 0) {
/*
* This is meant to get the size of the present remote-file by itself.
- * We don't support this now. Bail out!
+ * We do not support this now. Bail out!
*/
data->state.resume_from = 0;
}
@@ -2138,7 +2124,7 @@ static CURLcode addexpect(struct Curl_easy *data, struct dynbuf *r,
if(data->req.upgr101 != UPGR101_INIT)
return CURLE_OK;
- /* For really small puts we don't use Expect: headers at all, and for
+ /* For really small puts we do not use Expect: headers at all, and for
the somewhat bigger ones we allow the app to disable it. Just make
sure that the expect100header is always set to the preferred value
here. */
@@ -2190,7 +2176,7 @@ CURLcode Curl_http_req_complete(struct Curl_easy *data,
case HTTPREQ_POST_MIME:
#endif
/* We only set Content-Length and allow a custom Content-Length if
- we don't upload data chunked, as RFC2616 forbids us to set both
+ we do not upload data chunked, as RFC2616 forbids us to set both
kinds of headers (Transfer-Encoding: chunked and Content-Length).
We do not override a custom "Content-Length" header, but during
authentication negotiation that header is suppressed.
@@ -2199,7 +2185,7 @@ CURLcode Curl_http_req_complete(struct Curl_easy *data,
(data->req.authneg ||
!Curl_checkheaders(data, STRCONST("Content-Length")))) {
/* we allow replacing this header if not during auth negotiation,
- although it isn't very wise to actually set your own */
+ although it is not very wise to actually set your own */
result = Curl_dyn_addf(r,
"Content-Length: %" CURL_FORMAT_CURL_OFF_T
"\r\n", req_clen);
@@ -2247,7 +2233,7 @@ CURLcode Curl_http_req_complete(struct Curl_easy *data,
out:
if(!result) {
/* setup variables for the upcoming transfer */
- Curl_xfer_setup(data, FIRSTSOCKET, -1, TRUE, FIRSTSOCKET);
+ Curl_xfer_setup1(data, CURL_XFER_SENDRECV, -1, TRUE);
}
return result;
}
@@ -2335,7 +2321,7 @@ CURLcode Curl_http_range(struct Curl_easy *data,
{
if(data->state.use_range) {
/*
- * A range is selected. We use different headers whether we're downloading
+ * A range is selected. We use different headers whether we are downloading
* or uploading and we always let customized headers override our internal
* ones if any such are specified.
*/
@@ -2353,7 +2339,7 @@ CURLcode Curl_http_range(struct Curl_easy *data,
free(data->state.aptr.rangeline);
if(data->set.set_resume_from < 0) {
- /* Upload resume was asked for, but we don't know the size of the
+ /* Upload resume was asked for, but we do not know the size of the
remote part so we tell the server (and act accordingly) that we
upload the whole file (again) */
data->state.aptr.rangeline =
@@ -2397,12 +2383,12 @@ CURLcode Curl_http_firstwrite(struct Curl_easy *data)
if(data->req.newurl) {
if(conn->bits.close) {
/* Abort after the headers if "follow Location" is set
- and we're set to close anyway. */
+ and we are set to close anyway. */
k->keepon &= ~KEEP_RECV;
k->done = TRUE;
return CURLE_OK;
}
- /* We have a new url to load, but since we want to be able to reuse this
+ /* We have a new URL to load, but since we want to be able to reuse this
connection properly, we read the full response in "ignore more" */
k->ignorebody = TRUE;
infof(data, "Ignoring the response-body");
@@ -2413,7 +2399,7 @@ CURLcode Curl_http_firstwrite(struct Curl_easy *data)
if(k->size == data->state.resume_from) {
/* The resume point is at the end of file, consider this fine even if it
- doesn't allow resume from here. */
+ does not allow resume from here. */
infof(data, "The entire document is already downloaded");
streamclose(conn, "already downloaded");
/* Abort download */
@@ -2422,10 +2408,10 @@ CURLcode Curl_http_firstwrite(struct Curl_easy *data)
return CURLE_OK;
}
- /* we wanted to resume a download, although the server doesn't seem to
- * support this and we did this with a GET (if it wasn't a GET we did a
+ /* we wanted to resume a download, although the server does not seem to
+ * support this and we did this with a GET (if it was not a GET we did a
* POST or PUT resume) */
- failf(data, "HTTP server doesn't seem to support "
+ failf(data, "HTTP server does not seem to support "
"byte ranges. Cannot resume.");
return CURLE_RANGE_ERROR;
}
@@ -2437,7 +2423,7 @@ CURLcode Curl_http_firstwrite(struct Curl_easy *data)
if(!Curl_meets_timecondition(data, k->timeofdoc)) {
k->done = TRUE;
- /* We're simulating an HTTP 304 from server so we return
+ /* We are simulating an HTTP 304 from server so we return
what should have been returned from the server */
data->info.httpcode = 304;
infof(data, "Simulate an HTTP 304 response");
@@ -2459,7 +2445,7 @@ CURLcode Curl_transferencode(struct Curl_easy *data)
/* When we are to insert a TE: header in the request, we must also insert
TE in a Connection: header, so we need to merge the custom provided
Connection: header and prevent the original to get sent. Note that if
- the user has inserted his/her own TE: header we don't do this magic
+ the user has inserted his/her own TE: header we do not do this magic
but then assume that the user will handle it all! */
char *cptr = Curl_checkheaders(data, STRCONST("Connection"));
#define TE_HEADER "TE: gzip\r\n"
@@ -2705,7 +2691,7 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done)
if(!(conn->handler->flags&PROTOPT_SSL) &&
conn->httpversion < 20 &&
(data->state.httpwant == CURL_HTTP_VERSION_2)) {
- /* append HTTP2 upgrade magic stuff to the HTTP request if it isn't done
+ /* append HTTP2 upgrade magic stuff to the HTTP request if it is not done
over SSL */
result = Curl_http2_request_upgrade(&req, data);
if(result) {
@@ -2849,7 +2835,7 @@ CURLcode Curl_http_header(struct Curl_easy *data,
#ifndef CURL_DISABLE_ALTSVC
v = (data->asi &&
((data->conn->handler->flags & PROTOPT_SSL) ||
-#ifdef CURLDEBUG
+#ifdef DEBUGBUILD
/* allow debug builds to circumvent the HTTPS restriction */
getenv("CURL_ALTSVC_HTTP")
#else
@@ -2901,7 +2887,7 @@ CURLcode Curl_http_header(struct Curl_easy *data,
* Process Content-Encoding. Look for the values: identity,
* gzip, deflate, compress, x-gzip and x-compress. x-gzip and
* x-compress are the same as gzip and compress. (Sec 3.5 RFC
- * 2616). zlib cannot handle compress. However, errors are
+ * 2616). zlib cannot handle compress. However, errors are
* handled further down when the response body is processed
*/
return Curl_build_unencoding_stack(data, v, FALSE);
@@ -2936,7 +2922,7 @@ CURLcode Curl_http_header(struct Curl_easy *data,
/*
* An HTTP/1.0 reply with the 'Connection: keep-alive' line
* tells us the connection will be kept alive for our
- * pleasure. Default action for 1.0 is to close.
+ * pleasure. Default action for 1.0 is to close.
*
* [RFC2068, section 19.7.1] */
connkeep(conn, "Connection keep-alive");
@@ -3029,13 +3015,13 @@ CURLcode Curl_http_header(struct Curl_easy *data,
* connection will be kept alive for our pleasure.
* Default action for 1.0 is to close.
*/
- connkeep(conn, "Proxy-Connection keep-alive"); /* don't close */
+ connkeep(conn, "Proxy-Connection keep-alive"); /* do not close */
infof(data, "HTTP/1.0 proxy connection set to keep alive");
}
else if((conn->httpversion == 11) && conn->bits.httpproxy &&
HD_IS_AND_SAYS(hd, hdlen, "Proxy-Connection:", "close")) {
/*
- * We get an HTTP/1.1 response from a proxy and it says it'll
+ * We get an HTTP/1.1 response from a proxy and it says it will
* close down after this transfer.
*/
connclose(conn, "Proxy-Connection: asked to close after done");
@@ -3095,7 +3081,7 @@ CURLcode Curl_http_header(struct Curl_easy *data,
HD_VAL(hd, hdlen, "Set-Cookie:") : NULL;
if(v) {
/* If there is a custom-set Host: name, use it here, or else use
- * real peer host name. */
+ * real peer hostname. */
const char *host = data->state.aptr.cookiehost?
data->state.aptr.cookiehost:conn->host.name;
const bool secure_context =
@@ -3116,7 +3102,7 @@ CURLcode Curl_http_header(struct Curl_easy *data,
/* If enabled, the header is incoming and this is over HTTPS */
v = (data->hsts &&
((conn->handler->flags & PROTOPT_SSL) ||
-#ifdef CURLDEBUG
+#ifdef DEBUGBUILD
/* allow debug builds to circumvent the HTTPS restriction */
getenv("CURL_HSTS_HTTP")
#else
@@ -3160,7 +3146,7 @@ CURLcode Curl_http_header(struct Curl_easy *data,
if(result)
return result;
if(!k->chunk && data->set.http_transfer_encoding) {
- /* if this isn't chunked, only close can signal the end of this
+ /* if this is not chunked, only close can signal the end of this
* transfer as Content-Length is said not to be trusted for
* transfer-encoding! */
connclose(conn, "HTTP/1.1 transfer-encoding without chunks");
@@ -3231,11 +3217,11 @@ CURLcode Curl_http_statusline(struct Curl_easy *data,
data->state.httpversion = (unsigned char)k->httpversion;
/*
- * This code executes as part of processing the header. As a
- * result, it's not totally clear how to interpret the
+ * This code executes as part of processing the header. As a
+ * result, it is not totally clear how to interpret the
* response code yet as that depends on what other headers may
- * be present. 401 and 407 may be errors, but may be OK
- * depending on how authentication is working. Other codes
+ * be present. 401 and 407 may be errors, but may be OK
+ * depending on how authentication is working. Other codes
* are definitely errors, so give up here.
*/
if(data->state.resume_from && data->state.httpreq == HTTPREQ_GET &&
@@ -3287,7 +3273,7 @@ CURLcode Curl_http_statusline(struct Curl_easy *data,
}
/* Content-Length must be ignored if any Transfer-Encoding is present in the
- response. Refer to RFC 7230 section 3.3.3 and RFC2616 section 4.4. This is
+ response. Refer to RFC 7230 section 3.3.3 and RFC2616 section 4.4. This is
figured out here after all headers have been received but before the final
call to the user's header callback, so that a valid content length can be
retrieved by the user in the final call. */
@@ -3323,7 +3309,7 @@ static CURLcode verify_header(struct Curl_easy *data,
/* the first "header" is the status-line and it has no colon */
return CURLE_OK;
if(((hd[0] == ' ') || (hd[0] == '\t')) && k->headerline > 2)
- /* line folding, can't happen on line 2 */
+ /* line folding, cannot happen on line 2 */
;
else {
ptr = memchr(hd, ':', hdlen);
@@ -3363,8 +3349,35 @@ CURLcode Curl_bump_headersize(struct Curl_easy *data,
return CURLE_OK;
}
+static CURLcode http_write_header(struct Curl_easy *data,
+ const char *hd, size_t hdlen)
+{
+ CURLcode result;
+ int writetype;
+
+ /* now, only output this if the header AND body are requested:
+ */
+ Curl_debug(data, CURLINFO_HEADER_IN, (char *)hd, hdlen);
+
+ writetype = CLIENTWRITE_HEADER |
+ ((data->req.httpcode/100 == 1) ? CLIENTWRITE_1XX : 0);
+
+ result = Curl_client_write(data, writetype, hd, hdlen);
+ if(result)
+ return result;
+
+ result = Curl_bump_headersize(data, hdlen, FALSE);
+ if(result)
+ return result;
+
+ data->req.deductheadercount = (100 <= data->req.httpcode &&
+ 199 >= data->req.httpcode)?
+ data->req.headerbytecount:0;
+ return result;
+}
static CURLcode http_on_response(struct Curl_easy *data,
+ const char *last_hd, size_t last_hd_len,
const char *buf, size_t blen,
size_t *pconsumed)
{
@@ -3384,9 +3397,20 @@ static CURLcode http_on_response(struct Curl_easy *data,
conn->bundle->multiuse = BUNDLE_NO_MULTIUSE;
}
+ if(k->httpcode < 200 && last_hd) {
+ /* Intermediate responses might trigger processing of more
+ * responses, write the last header to the client before
+ * proceeding. */
+ result = http_write_header(data, last_hd, last_hd_len);
+ last_hd = NULL; /* handled it */
+ if(result)
+ goto out;
+ }
+
if(k->httpcode < 100) {
failf(data, "Unsupported response code in HTTP response");
- return CURLE_UNSUPPORTED_PROTOCOL;
+ result = CURLE_UNSUPPORTED_PROTOCOL;
+ goto out;
}
else if(k->httpcode < 200) {
/* "A user agent MAY ignore unexpected 1xx status responses."
@@ -3405,10 +3429,12 @@ static CURLcode http_on_response(struct Curl_easy *data,
break;
case 101:
/* Switching Protocols only allowed from HTTP/1.1 */
+
if(conn->httpversion != 11) {
/* invalid for other HTTP versions */
failf(data, "unexpected 101 response code");
- return CURLE_WEIRD_SERVER_REPLY;
+ result = CURLE_WEIRD_SERVER_REPLY;
+ goto out;
}
if(k->upgr101 == UPGR101_H2) {
/* Switching to HTTP/2, where we will get more responses */
@@ -3421,7 +3447,7 @@ static CURLcode http_on_response(struct Curl_easy *data,
* be processed. */
result = Curl_http2_upgrade(data, conn, FIRSTSOCKET, buf, blen);
if(result)
- return result;
+ goto out;
*pconsumed += blen;
}
#ifdef USE_WEBSOCKETS
@@ -3430,7 +3456,7 @@ static CURLcode http_on_response(struct Curl_easy *data,
* WebSockets format and taken in by the protocol handler. */
result = Curl_ws_accept(data, buf, blen);
if(result)
- return result;
+ goto out;
*pconsumed += blen; /* ws accept handled the data */
k->header = FALSE; /* we will not get more responses */
if(data->set.connect_only)
@@ -3451,7 +3477,7 @@ static CURLcode http_on_response(struct Curl_easy *data,
* to receive a final response eventually. */
break;
}
- return result;
+ goto out;
}
/* k->httpcode >= 200, final response */
@@ -3512,7 +3538,8 @@ static CURLcode http_on_response(struct Curl_easy *data,
/* All >=200 HTTP status codes are errors when wanting websockets */
if(data->req.upgr101 == UPGR101_WS) {
failf(data, "Refused WebSockets upgrade: %d", k->httpcode);
- return CURLE_HTTP_RETURNED_ERROR;
+ result = CURLE_HTTP_RETURNED_ERROR;
+ goto out;
}
#endif
@@ -3520,7 +3547,8 @@ static CURLcode http_on_response(struct Curl_easy *data,
if(http_should_fail(data, data->req.httpcode)) {
failf(data, "The requested URL returned error: %d",
k->httpcode);
- return CURLE_HTTP_RETURNED_ERROR;
+ result = CURLE_HTTP_RETURNED_ERROR;
+ goto out;
}
/* Curl_http_auth_act() checks what authentication methods
@@ -3528,7 +3556,7 @@ static CURLcode http_on_response(struct Curl_easy *data,
* use. It will set 'newurl' if an auth method was picked. */
result = Curl_http_auth_act(data);
if(result)
- return result;
+ goto out;
if(k->httpcode >= 300) {
if((!data->req.authneg) && !conn->bits.close &&
@@ -3551,7 +3579,7 @@ static CURLcode http_on_response(struct Curl_easy *data,
case HTTPREQ_POST_MIME:
/* We got an error response. If this happened before the whole
* request body has been sent we stop sending and mark the
- * connection for closure after we've read the entire response.
+ * connection for closure after we have read the entire response.
*/
if(!Curl_req_done_sending(data)) {
if((k->httpcode == 417) && Curl_http_exp100_is_selected(data)) {
@@ -3566,7 +3594,7 @@ static CURLcode http_on_response(struct Curl_easy *data,
"Stop sending data before everything sent");
result = http_perhapsrewind(data, conn);
if(result)
- return result;
+ goto out;
}
data->state.disableexpect = TRUE;
DEBUGASSERT(!data->req.newurl);
@@ -3582,7 +3610,7 @@ static CURLcode http_on_response(struct Curl_easy *data,
streamclose(conn, "Stop sending data before everything sent");
result = Curl_req_abort_sending(data);
if(result)
- return result;
+ goto out;
}
}
break;
@@ -3605,7 +3633,7 @@ static CURLcode http_on_response(struct Curl_easy *data,
*/
result = Curl_http_size(data);
if(result)
- return result;
+ goto out;
/* If we requested a "no body", this is a good time to get
* out and return home.
@@ -3614,9 +3642,9 @@ static CURLcode http_on_response(struct Curl_easy *data,
k->download_done = TRUE;
/* If max download size is *zero* (nothing) we already have
- nothing and can safely return ok now! But for HTTP/2, we'd
+ nothing and can safely return ok now! But for HTTP/2, we would
like to call http2_handle_stream_close to properly close a
- stream. In order to do this, we keep reading until we
+ stream. In order to do this, we keep reading until we
close the stream. */
if(0 == k->maxdownload
&& !Curl_conn_is_http2(data, conn, FIRSTSOCKET)
@@ -3624,7 +3652,16 @@ static CURLcode http_on_response(struct Curl_easy *data,
k->download_done = TRUE;
/* final response without error, prepare to receive the body */
- return Curl_http_firstwrite(data);
+ result = Curl_http_firstwrite(data);
+
+out:
+ if(last_hd) {
+ /* if not written yet, write it now */
+ CURLcode r2 = http_write_header(data, last_hd, last_hd_len);
+ if(!result)
+ result = r2;
+ }
+ return result;
}
static CURLcode http_rw_hd(struct Curl_easy *data,
@@ -3639,36 +3676,25 @@ static CURLcode http_rw_hd(struct Curl_easy *data,
*pconsumed = 0;
if((0x0a == *hd) || (0x0d == *hd)) {
/* Empty header line means end of headers! */
+ struct dynbuf last_header;
size_t consumed;
- /* now, only output this if the header AND body are requested:
- */
- Curl_debug(data, CURLINFO_HEADER_IN, (char *)hd, hdlen);
-
- writetype = CLIENTWRITE_HEADER |
- ((k->httpcode/100 == 1) ? CLIENTWRITE_1XX : 0);
-
- result = Curl_client_write(data, writetype, hd, hdlen);
+ Curl_dyn_init(&last_header, hdlen + 1);
+ result = Curl_dyn_addn(&last_header, hd, hdlen);
if(result)
return result;
- result = Curl_bump_headersize(data, hdlen, FALSE);
- if(result)
- return result;
-
- data->req.deductheadercount =
- (100 <= k->httpcode && 199 >= k->httpcode)?data->req.headerbytecount:0;
-
/* analyze the response to find out what to do. */
/* Caveat: we clear anything in the header brigade, because a
* response might switch HTTP version which may call use recursively.
* Not nice, but that is currently the way of things. */
Curl_dyn_reset(&data->state.headerb);
- result = http_on_response(data, buf_remain, blen, &consumed);
- if(result)
- return result;
+ result = http_on_response(data, Curl_dyn_ptr(&last_header),
+ Curl_dyn_len(&last_header),
+ buf_remain, blen, &consumed);
*pconsumed += consumed;
- return CURLE_OK;
+ Curl_dyn_free(&last_header);
+ return result;
}
/*
@@ -3681,14 +3707,14 @@ static CURLcode http_rw_hd(struct Curl_easy *data,
or else we consider this to be the body right away! */
bool fine_statusline = FALSE;
- k->httpversion = 0; /* Don't know yet */
+ k->httpversion = 0; /* Do not know yet */
if(data->conn->handler->protocol & PROTO_FAMILY_HTTP) {
/*
* https://datatracker.ietf.org/doc/html/rfc7230#section-3.1.2
*
* The response code is always a three-digit number in HTTP as the spec
* says. We allow any three-digit number here, but we cannot make
- * guarantees on future behaviors since it isn't within the protocol.
+ * guarantees on future behaviors since it is not within the protocol.
*/
const char *p = hd;
@@ -4431,7 +4457,7 @@ static CURLcode cr_exp100_read(struct Curl_easy *data,
*eos = FALSE;
return CURLE_OK;
}
- /* we've waited long enough, continue anyway */
+ /* we have waited long enough, continue anyway */
http_exp100_continue(data, reader);
infof(data, "Done waiting for 100-continue");
FALLTHROUGH();
@@ -4460,6 +4486,7 @@ static const struct Curl_crtype cr_exp100 = {
Curl_creader_def_resume_from,
Curl_creader_def_rewind,
Curl_creader_def_unpause,
+ Curl_creader_def_is_paused,
cr_exp100_done,
sizeof(struct cr_exp100_ctx)
};
diff --git a/libs/libcurl/src/http.h b/libs/libcurl/src/http.h
index c56758be23..677825ae73 100644
--- a/libs/libcurl/src/http.h
+++ b/libs/libcurl/src/http.h
@@ -73,7 +73,6 @@ char *Curl_checkProxyheaders(struct Curl_easy *data,
const struct connectdata *conn,
const char *thisheader,
const size_t thislen);
-struct HTTP; /* see below */
CURLcode Curl_add_timecondition(struct Curl_easy *data,
#ifndef USE_HYPER
@@ -147,7 +146,7 @@ CURLcode Curl_http_auth_act(struct Curl_easy *data);
selected to use no auth at all. Ie, we actively select no auth, as opposed
to not having one selected. The other CURLAUTH_* defines are present in the
public curl/curl.h header. */
-#define CURLAUTH_PICKNONE (1<<30) /* don't use auth */
+#define CURLAUTH_PICKNONE (1<<30) /* do not use auth */
/* MAX_INITIAL_POST_SIZE indicates the number of bytes that will make the POST
data get included in the initial data chunk sent to the server. If the
@@ -187,10 +186,6 @@ void Curl_http_exp100_got100(struct Curl_easy *data);
/****************************************************************************
* HTTP unique setup
***************************************************************************/
-struct HTTP {
- /* TODO: no longer used, we should remove it from SingleRequest */
- char unused;
-};
CURLcode Curl_http_size(struct Curl_easy *data);
@@ -240,7 +235,7 @@ struct httpreq {
};
/**
- * Create a HTTP request struct.
+ * Create an HTTP request struct.
*/
CURLcode Curl_http_req_make(struct httpreq **preq,
const char *method, size_t m_len,
@@ -290,7 +285,7 @@ struct http_resp {
};
/**
- * Create a HTTP response struct.
+ * Create an HTTP response struct.
*/
CURLcode Curl_http_resp_make(struct http_resp **presp,
int status,
diff --git a/libs/libcurl/src/http1.c b/libs/libcurl/src/http1.c
index 3cef64f5c1..e9bd487fe6 100644
--- a/libs/libcurl/src/http1.c
+++ b/libs/libcurl/src/http1.c
@@ -217,7 +217,7 @@ static CURLcode start_req(struct h1_req_parser *parser,
tmp[target_len] = '\0';
/* See if treating TARGET as an absolute URL makes sense */
if(Curl_is_absolute_url(tmp, NULL, 0, FALSE)) {
- int url_options;
+ unsigned int url_options;
url = curl_url();
if(!url) {
diff --git a/libs/libcurl/src/http2.c b/libs/libcurl/src/http2.c
index 261c9fbb8a..108678a2b8 100644
--- a/libs/libcurl/src/http2.c
+++ b/libs/libcurl/src/http2.c
@@ -83,18 +83,17 @@
/* spare chunks we keep for a full window */
#define H2_STREAM_POOL_SPARES (H2_STREAM_WINDOW_SIZE / H2_CHUNK_SIZE)
-/* We need to accommodate the max number of streams with their window
- * sizes on the overall connection. Streams might become PAUSED which
- * will block their received QUOTA in the connection window. And if we
- * run out of space, the server is blocked from sending us any data.
- * See #10988 for an issue with this. */
+/* We need to accommodate the max number of streams with their window sizes on
+ * the overall connection. Streams might become PAUSED which will block their
+ * received QUOTA in the connection window. If we run out of space, the server
+ * is blocked from sending us any data. See #10988 for an issue with this. */
#define HTTP2_HUGE_WINDOW_SIZE (100 * H2_STREAM_WINDOW_SIZE)
#define H2_SETTINGS_IV_LEN 3
#define H2_BINSETTINGS_LEN 80
-static int populate_settings(nghttp2_settings_entry *iv,
- struct Curl_easy *data)
+static size_t populate_settings(nghttp2_settings_entry *iv,
+ struct Curl_easy *data)
{
iv[0].settings_id = NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS;
iv[0].value = Curl_multi_max_concurrent_streams(data->multi);
@@ -112,7 +111,7 @@ static ssize_t populate_binsettings(uint8_t *binsettings,
struct Curl_easy *data)
{
nghttp2_settings_entry iv[H2_SETTINGS_IV_LEN];
- int ivlen;
+ size_t ivlen;
ivlen = populate_settings(iv, data);
/* this returns number of bytes it wrote or a negative number on error. */
@@ -133,10 +132,12 @@ struct cf_h2_ctx {
struct Curl_hash streams; /* hash of `data->id` to `h2_stream_ctx` */
size_t drain_total; /* sum of all stream's UrlState drain */
uint32_t max_concurrent_streams;
- int32_t goaway_error;
- int32_t last_stream_id;
+ uint32_t goaway_error; /* goaway error code from server */
+ int32_t remote_max_sid; /* max id processed by server */
+ int32_t local_max_sid; /* max id processed by us */
BIT(conn_closed);
- BIT(goaway);
+ BIT(rcvd_goaway);
+ BIT(sent_goaway);
BIT(enable_push);
BIT(nw_out_blocked);
};
@@ -289,10 +290,6 @@ static CURLcode http2_data_setup(struct Curl_cfilter *cf,
(void)cf;
DEBUGASSERT(data);
- if(!data->req.p.http) {
- failf(data, "initialization failure, transfer not http initialized");
- return CURLE_FAILED_INIT;
- }
stream = H2_STREAM_CTX(ctx, data);
if(stream) {
*pstream = stream;
@@ -437,7 +434,7 @@ static CURLcode cf_h2_ctx_init(struct Curl_cfilter *cf,
Curl_bufq_initp(&ctx->outbufq, &ctx->stream_bufcp, H2_NW_SEND_CHUNKS, 0);
Curl_dyn_init(&ctx->scratch, CURL_MAX_HTTP_HEADER);
Curl_hash_offt_init(&ctx->streams, 63, h2_stream_hash_free);
- ctx->last_stream_id = 2147483647;
+ ctx->remote_max_sid = 2147483647;
rc = nghttp2_session_callbacks_new(&cbs);
if(rc) {
@@ -486,7 +483,7 @@ static CURLcode cf_h2_ctx_init(struct Curl_cfilter *cf,
DEBUGASSERT(stream);
stream->id = 1;
/* queue SETTINGS frame (again) */
- rc = nghttp2_session_upgrade2(ctx->h2, binsettings, binlen,
+ rc = nghttp2_session_upgrade2(ctx->h2, binsettings, (size_t)binlen,
data->state.httpreq == HTTPREQ_HEAD,
NULL);
if(rc) {
@@ -507,7 +504,7 @@ static CURLcode cf_h2_ctx_init(struct Curl_cfilter *cf,
}
else {
nghttp2_settings_entry iv[H2_SETTINGS_IV_LEN];
- int ivlen;
+ size_t ivlen;
ivlen = populate_settings(iv, data);
rc = nghttp2_submit_settings(ctx->h2, NGHTTP2_FLAG_NONE,
@@ -612,8 +609,8 @@ static bool http2_connisalive(struct Curl_cfilter *cf, struct Curl_easy *data,
return FALSE;
if(*input_pending) {
- /* This happens before we've sent off a request and the connection is
- not in use by any other transfer, there shouldn't be any data here,
+ /* This happens before we have sent off a request and the connection is
+ not in use by any other transfer, there should not be any data here,
only "protocol frames" */
CURLcode result;
ssize_t nread = -1;
@@ -794,18 +791,9 @@ static struct Curl_easy *h2_duphandle(struct Curl_cfilter *cf,
{
struct Curl_easy *second = curl_easy_duphandle(data);
if(second) {
- /* setup the request struct */
- struct HTTP *http = calloc(1, sizeof(struct HTTP));
- if(!http) {
- (void)Curl_close(&second);
- }
- else {
- struct h2_stream_ctx *second_stream;
-
- second->req.p.http = http;
- http2_data_setup(cf, second, &second_stream);
- second->state.priority.weight = data->state.priority.weight;
- }
+ struct h2_stream_ctx *second_stream;
+ http2_data_setup(cf, second, &second_stream);
+ second->state.priority.weight = data->state.priority.weight;
}
return second;
}
@@ -867,9 +855,7 @@ fail:
static void discard_newhandle(struct Curl_cfilter *cf,
struct Curl_easy *newhandle)
{
- if(newhandle->req.p.http) {
- http2_data_done(cf, newhandle);
- }
+ http2_data_done(cf, newhandle);
(void)Curl_close(&newhandle);
}
@@ -967,6 +953,10 @@ static int push_promise(struct Curl_cfilter *cf,
rv = CURL_PUSH_DENY;
goto fail;
}
+
+ /* success, remember max stream id processed */
+ if(newstream->id > ctx->local_max_sid)
+ ctx->local_max_sid = newstream->id;
}
else {
CURL_TRC_CF(data, cf, "Got PUSH_PROMISE, ignore it");
@@ -1007,7 +997,7 @@ static void h2_xfer_write_resp(struct Curl_cfilter *cf,
"RST-ing stream",
stream->id, stream->xfer_result, blen);
nghttp2_submit_rst_stream(ctx->h2, 0, stream->id,
- NGHTTP2_ERR_CALLBACK_FAILURE);
+ (uint32_t)NGHTTP2_ERR_CALLBACK_FAILURE);
}
}
@@ -1048,7 +1038,7 @@ static CURLcode on_stream_frame(struct Curl_cfilter *cf,
break;
case NGHTTP2_HEADERS:
if(stream->bodystarted) {
- /* Only valid HEADERS after body started is trailer HEADERS. We
+ /* Only valid HEADERS after body started is trailer HEADERS. We
buffer them in on_header callback. */
break;
}
@@ -1252,12 +1242,12 @@ static int on_frame_recv(nghttp2_session *session, const nghttp2_frame *frame,
break;
}
case NGHTTP2_GOAWAY:
- ctx->goaway = TRUE;
+ ctx->rcvd_goaway = TRUE;
ctx->goaway_error = frame->goaway.error_code;
- ctx->last_stream_id = frame->goaway.last_stream_id;
+ ctx->remote_max_sid = frame->goaway.last_stream_id;
if(data) {
- infof(data, "received GOAWAY, error=%d, last_stream=%u",
- ctx->goaway_error, ctx->last_stream_id);
+ infof(data, "received GOAWAY, error=%u, last_stream=%u",
+ ctx->goaway_error, ctx->remote_max_sid);
Curl_multi_connchanged(data->multi);
}
break;
@@ -1654,7 +1644,7 @@ CURLcode Curl_http2_request_upgrade(struct dynbuf *req,
return CURLE_FAILED_INIT;
}
- result = Curl_base64url_encode((const char *)binsettings, binlen,
+ result = Curl_base64url_encode((const char *)binsettings, (size_t)binlen,
&base64, &blen);
if(result) {
Curl_dyn_free(req);
@@ -1710,7 +1700,7 @@ static ssize_t http2_handle_stream_close(struct Curl_cfilter *cf,
if(stream->error == NGHTTP2_REFUSED_STREAM) {
CURL_TRC_CF(data, cf, "[%d] REFUSED_STREAM, try again on a new "
"connection", stream->id);
- connclose(cf->conn, "REFUSED_STREAM"); /* don't use this anymore */
+ connclose(cf->conn, "REFUSED_STREAM"); /* do not use this anymore */
data->state.refused_stream = TRUE;
*err = CURLE_RECV_ERROR; /* trigger Curl_retry_request() later */
return -1;
@@ -1817,7 +1807,7 @@ static void h2_pri_spec(struct cf_h2_ctx *ctx,
}
/*
- * Check if there's been an update in the priority /
+ * Check if there is been an update in the priority /
* dependency settings and if so it submits a PRIORITY frame with the updated
* info.
* Flush any out data pending in the network buffer.
@@ -1878,7 +1868,7 @@ static ssize_t stream_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
}
else if(stream->reset ||
(ctx->conn_closed && Curl_bufq_is_empty(&ctx->inbufq)) ||
- (ctx->goaway && ctx->last_stream_id < stream->id)) {
+ (ctx->rcvd_goaway && ctx->remote_max_sid < stream->id)) {
CURL_TRC_CF(data, cf, "[%d] returning ERR", stream->id);
*err = data->req.bytecount? CURLE_PARTIAL_FILE : CURLE_HTTP2;
nread = -1;
@@ -2015,7 +2005,7 @@ static ssize_t cf_h2_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
out:
result = h2_progress_egress(cf, data);
if(result == CURLE_AGAIN) {
- /* pending data to send, need to be called again. Ideally, we'd
+ /* pending data to send, need to be called again. Ideally, we would
* monitor the socket for POLLOUT, but we might not be in SENDING
* transfer state any longer and are unable to make this happen.
*/
@@ -2292,8 +2282,8 @@ static ssize_t cf_h2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
/* Unable to send all data, due to connection blocked or H2 window
* exhaustion. Data is left in our stream buffer, or nghttp2's internal
* frame buffer or our network out buffer. */
- size_t rwin = nghttp2_session_get_stream_remote_window_size(ctx->h2,
- stream->id);
+ size_t rwin = (size_t)nghttp2_session_get_stream_remote_window_size(
+ ctx->h2, stream->id);
/* At the start of a stream, we are called with request headers
* and, possibly, parts of the body. Later, only body data.
* If we cannot send pure body data, we EAGAIN. If there had been
@@ -2358,6 +2348,7 @@ static void cf_h2_adjust_pollset(struct Curl_cfilter *cf,
struct easy_pollset *ps)
{
struct cf_h2_ctx *ctx = cf->ctx;
+ struct cf_call_data save;
curl_socket_t sock;
bool want_recv, want_send;
@@ -2368,7 +2359,6 @@ static void cf_h2_adjust_pollset(struct Curl_cfilter *cf,
Curl_pollset_check(data, ps, sock, &want_recv, &want_send);
if(want_recv || want_send) {
struct h2_stream_ctx *stream = H2_STREAM_CTX(ctx, data);
- struct cf_call_data save;
bool c_exhaust, s_exhaust;
CF_DATA_SAVE(save, cf, data);
@@ -2383,6 +2373,14 @@ static void cf_h2_adjust_pollset(struct Curl_cfilter *cf,
Curl_pollset_set(data, ps, sock, want_recv, want_send);
CF_DATA_RESTORE(cf, save);
}
+ else if(ctx->sent_goaway && !cf->shutdown) {
+ /* shutdown in progress */
+ CF_DATA_SAVE(save, cf, data);
+ want_send = nghttp2_session_want_write(ctx->h2);
+ want_recv = nghttp2_session_want_read(ctx->h2);
+ Curl_pollset_set(data, ps, sock, want_recv, want_send);
+ CF_DATA_RESTORE(cf, save);
+ }
}
static CURLcode cf_h2_connect(struct Curl_cfilter *cf,
@@ -2446,6 +2444,7 @@ static void cf_h2_close(struct Curl_cfilter *cf, struct Curl_easy *data)
CF_DATA_SAVE(save, cf, data);
cf_h2_ctx_clear(ctx);
CF_DATA_RESTORE(cf, save);
+ cf->connected = FALSE;
}
if(cf->next)
cf->next->cft->do_close(cf->next, data);
@@ -2462,6 +2461,50 @@ static void cf_h2_destroy(struct Curl_cfilter *cf, struct Curl_easy *data)
}
}
+static CURLcode cf_h2_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data, bool *done)
+{
+ struct cf_h2_ctx *ctx = cf->ctx;
+ struct cf_call_data save;
+ CURLcode result;
+ int rv;
+
+ if(!cf->connected || !ctx->h2 || cf->shutdown || ctx->conn_closed) {
+ *done = TRUE;
+ return CURLE_OK;
+ }
+
+ CF_DATA_SAVE(save, cf, data);
+
+ if(!ctx->sent_goaway) {
+ rv = nghttp2_submit_goaway(ctx->h2, NGHTTP2_FLAG_NONE,
+ ctx->local_max_sid, 0,
+ (const uint8_t *)"shutown", sizeof("shutown"));
+ if(rv) {
+ failf(data, "nghttp2_submit_goaway() failed: %s(%d)",
+ nghttp2_strerror(rv), rv);
+ result = CURLE_SEND_ERROR;
+ goto out;
+ }
+ ctx->sent_goaway = TRUE;
+ }
+ /* GOAWAY submitted, process egress and ingress until nghttp2 is done. */
+ result = CURLE_OK;
+ if(nghttp2_session_want_write(ctx->h2))
+ result = h2_progress_egress(cf, data);
+ if(!result && nghttp2_session_want_read(ctx->h2))
+ result = h2_progress_ingress(cf, data, 0);
+
+ *done = (ctx->conn_closed ||
+ (!result && !nghttp2_session_want_write(ctx->h2) &&
+ !nghttp2_session_want_read(ctx->h2)));
+
+out:
+ CF_DATA_RESTORE(cf, save);
+ cf->shutdown = (result || *done);
+ return result;
+}
+
static CURLcode http2_data_pause(struct Curl_cfilter *cf,
struct Curl_easy *data,
bool pause)
@@ -2474,10 +2517,10 @@ static CURLcode http2_data_pause(struct Curl_cfilter *cf,
if(ctx && ctx->h2 && stream) {
uint32_t window = pause? 0 : stream->local_window_size;
- int rv = nghttp2_session_set_local_window_size(ctx->h2,
- NGHTTP2_FLAG_NONE,
- stream->id,
- window);
+ int rv = (int)nghttp2_session_set_local_window_size(ctx->h2,
+ NGHTTP2_FLAG_NONE,
+ stream->id,
+ (int32_t)window);
if(rv) {
failf(data, "nghttp2_session_set_local_window_size() failed: %s(%d)",
nghttp2_strerror(rv), rv);
@@ -2505,7 +2548,7 @@ static CURLcode http2_data_pause(struct Curl_cfilter *cf,
#ifdef DEBUGBUILD
{
/* read out the stream local window again */
- uint32_t window2 =
+ uint32_t window2 = (uint32_t)
nghttp2_session_get_stream_local_window_size(ctx->h2,
stream->id);
DEBUGF(infof(data, "HTTP/2 window size is now %u for stream %u",
@@ -2632,6 +2675,7 @@ struct Curl_cftype Curl_cft_nghttp2 = {
cf_h2_destroy,
cf_h2_connect,
cf_h2_close,
+ cf_h2_shutdown,
Curl_cf_def_get_host,
cf_h2_adjust_pollset,
cf_h2_data_pending,
@@ -2729,7 +2773,7 @@ bool Curl_http2_may_switch(struct Curl_easy *data,
data->state.httpwant == CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE) {
#ifndef CURL_DISABLE_PROXY
if(conn->bits.httpproxy && !conn->bits.tunnel_proxy) {
- /* We don't support HTTP/2 proxies yet. Also it's debatable
+ /* We do not support HTTP/2 proxies yet. Also it is debatable
whether or not this setting should apply to HTTP/2 proxies. */
infof(data, "Ignoring HTTP/2 prior knowledge due to proxy");
return FALSE;
@@ -2753,7 +2797,7 @@ CURLcode Curl_http2_switch(struct Curl_easy *data,
if(result)
return result;
- conn->httpversion = 20; /* we know we're on HTTP/2 now */
+ conn->httpversion = 20; /* we know we are on HTTP/2 now */
conn->bits.multiplex = TRUE; /* at least potentially multiplexed */
conn->bundle->multiuse = BUNDLE_MULTIPLEX;
Curl_multi_connchanged(data->multi);
@@ -2777,7 +2821,7 @@ CURLcode Curl_http2_switch_at(struct Curl_cfilter *cf, struct Curl_easy *data)
return result;
cf_h2 = cf->next;
- cf->conn->httpversion = 20; /* we know we're on HTTP/2 now */
+ cf->conn->httpversion = 20; /* we know we are on HTTP/2 now */
cf->conn->bits.multiplex = TRUE; /* at least potentially multiplexed */
cf->conn->bundle->multiuse = BUNDLE_MULTIPLEX;
Curl_multi_connchanged(data->multi);
@@ -2830,7 +2874,7 @@ CURLcode Curl_http2_upgrade(struct Curl_easy *data,
" after upgrade: len=%zu", nread);
}
- conn->httpversion = 20; /* we know we're on HTTP/2 now */
+ conn->httpversion = 20; /* we know we are on HTTP/2 now */
conn->bits.multiplex = TRUE; /* at least potentially multiplexed */
conn->bundle->multiuse = BUNDLE_MULTIPLEX;
Curl_multi_connchanged(data->multi);
diff --git a/libs/libcurl/src/http_aws_sigv4.c b/libs/libcurl/src/http_aws_sigv4.c
index 0faa386652..3381474cb2 100644
--- a/libs/libcurl/src/http_aws_sigv4.c
+++ b/libs/libcurl/src/http_aws_sigv4.c
@@ -423,6 +423,76 @@ static int compare_func(const void *a, const void *b)
#define MAX_QUERYPAIRS 64
+/**
+ * found_equals have a double meaning,
+ * detect if an equal have been found when called from canon_query,
+ * and mark that this function is called to compute the path,
+ * if found_equals is NULL.
+ */
+static CURLcode canon_string(const char *q, size_t len,
+ struct dynbuf *dq, bool *found_equals)
+{
+ CURLcode result = CURLE_OK;
+
+ for(; len && !result; q++, len--) {
+ if(ISALNUM(*q))
+ result = Curl_dyn_addn(dq, q, 1);
+ else {
+ switch(*q) {
+ case '-':
+ case '.':
+ case '_':
+ case '~':
+ /* allowed as-is */
+ result = Curl_dyn_addn(dq, q, 1);
+ break;
+ case '%':
+ /* uppercase the following if hexadecimal */
+ if(ISXDIGIT(q[1]) && ISXDIGIT(q[2])) {
+ char tmp[3]="%";
+ tmp[1] = Curl_raw_toupper(q[1]);
+ tmp[2] = Curl_raw_toupper(q[2]);
+ result = Curl_dyn_addn(dq, tmp, 3);
+ q += 2;
+ len -= 2;
+ }
+ else
+ /* '%' without a following two-digit hex, encode it */
+ result = Curl_dyn_addn(dq, "%25", 3);
+ break;
+ default: {
+ const char hex[] = "0123456789ABCDEF";
+ char out[3]={'%'};
+
+ if(!found_equals) {
+ /* if found_equals is NULL assuming, been in path */
+ if(*q == '/') {
+ /* allowed as if */
+ result = Curl_dyn_addn(dq, q, 1);
+ break;
+ }
+ }
+ else {
+ /* allowed as-is */
+ if(*q == '=') {
+ result = Curl_dyn_addn(dq, q, 1);
+ *found_equals = true;
+ break;
+ }
+ }
+ /* URL encode */
+ out[1] = hex[((unsigned char)*q)>>4];
+ out[2] = hex[*q & 0xf];
+ result = Curl_dyn_addn(dq, out, 3);
+ break;
+ }
+ }
+ }
+ }
+ return result;
+}
+
+
static CURLcode canon_query(struct Curl_easy *data,
const char *query, struct dynbuf *dq)
{
@@ -460,54 +530,11 @@ static CURLcode canon_query(struct Curl_easy *data,
ap = &array[0];
for(i = 0; !result && (i < entry); i++, ap++) {
- size_t len;
const char *q = ap->p;
bool found_equals = false;
if(!ap->len)
continue;
- for(len = ap->len; len && !result; q++, len--) {
- if(ISALNUM(*q))
- result = Curl_dyn_addn(dq, q, 1);
- else {
- switch(*q) {
- case '-':
- case '.':
- case '_':
- case '~':
- /* allowed as-is */
- result = Curl_dyn_addn(dq, q, 1);
- break;
- case '=':
- /* allowed as-is */
- result = Curl_dyn_addn(dq, q, 1);
- found_equals = true;
- break;
- case '%':
- /* uppercase the following if hexadecimal */
- if(ISXDIGIT(q[1]) && ISXDIGIT(q[2])) {
- char tmp[3]="%";
- tmp[1] = Curl_raw_toupper(q[1]);
- tmp[2] = Curl_raw_toupper(q[2]);
- result = Curl_dyn_addn(dq, tmp, 3);
- q += 2;
- len -= 2;
- }
- else
- /* '%' without a following two-digit hex, encode it */
- result = Curl_dyn_addn(dq, "%25", 3);
- break;
- default: {
- /* URL encode */
- const char hex[] = "0123456789ABCDEF";
- char out[3]={'%'};
- out[1] = hex[((unsigned char)*q)>>4];
- out[2] = hex[*q & 0xf];
- result = Curl_dyn_addn(dq, out, 3);
- break;
- }
- }
- }
- }
+ result = canon_string(q, ap->len, dq, &found_equals);
if(!result && !found_equals) {
/* queries without value still need an equals */
result = Curl_dyn_addn(dq, "=", 1);
@@ -540,6 +567,7 @@ CURLcode Curl_output_aws_sigv4(struct Curl_easy *data, bool proxy)
struct dynbuf canonical_headers;
struct dynbuf signed_headers;
struct dynbuf canonical_query;
+ struct dynbuf canonical_path;
char *date_header = NULL;
Curl_HttpReq httpreq;
const char *method = NULL;
@@ -570,6 +598,7 @@ CURLcode Curl_output_aws_sigv4(struct Curl_easy *data, bool proxy)
Curl_dyn_init(&canonical_headers, CURL_MAX_HTTP_HEADER);
Curl_dyn_init(&canonical_query, CURL_MAX_HTTP_HEADER);
Curl_dyn_init(&signed_headers, CURL_MAX_HTTP_HEADER);
+ Curl_dyn_init(&canonical_path, CURL_MAX_HTTP_HEADER);
/*
* Parameters parsing
@@ -591,7 +620,7 @@ CURLcode Curl_output_aws_sigv4(struct Curl_easy *data, bool proxy)
":%" MAX_SIGV4_LEN_TXT "s",
provider0, provider1, region, service);
if(!provider0[0]) {
- failf(data, "first aws-sigv4 provider can't be empty");
+ failf(data, "first aws-sigv4 provider cannot be empty");
result = CURLE_BAD_FUNCTION_ARGUMENT;
goto fail;
}
@@ -665,10 +694,10 @@ CURLcode Curl_output_aws_sigv4(struct Curl_easy *data, bool proxy)
if(force_timestamp)
clock = 0;
else
- time(&clock);
+ clock = time(NULL);
}
#else
- time(&clock);
+ clock = time(NULL);
#endif
result = Curl_gmtime(clock, &tm);
if(result) {
@@ -698,6 +727,11 @@ CURLcode Curl_output_aws_sigv4(struct Curl_easy *data, bool proxy)
result = canon_query(data, data->state.up.query, &canonical_query);
if(result)
goto fail;
+
+ result = canon_string(data->state.up.path, strlen(data->state.up.path),
+ &canonical_path, NULL);
+ if(result)
+ goto fail;
result = CURLE_OUT_OF_MEMORY;
canonical_request =
@@ -708,7 +742,7 @@ CURLcode Curl_output_aws_sigv4(struct Curl_easy *data, bool proxy)
"%s\n" /* SignedHeaders */
"%.*s", /* HashedRequestPayload in hex */
method,
- data->state.up.path,
+ Curl_dyn_ptr(&canonical_path),
Curl_dyn_ptr(&canonical_query) ?
Curl_dyn_ptr(&canonical_query) : "",
Curl_dyn_ptr(&canonical_headers),
@@ -776,7 +810,7 @@ CURLcode Curl_output_aws_sigv4(struct Curl_easy *data, bool proxy)
"SignedHeaders=%s, "
"Signature=%s\r\n"
/*
- * date_header is added here, only if it wasn't
+ * date_header is added here, only if it was not
* user-specified (using CURLOPT_HTTPHEADER).
* date_header includes \r\n
*/
@@ -800,6 +834,7 @@ CURLcode Curl_output_aws_sigv4(struct Curl_easy *data, bool proxy)
fail:
Curl_dyn_free(&canonical_query);
+ Curl_dyn_free(&canonical_path);
Curl_dyn_free(&canonical_headers);
Curl_dyn_free(&signed_headers);
free(canonical_request);
diff --git a/libs/libcurl/src/http_chunks.c b/libs/libcurl/src/http_chunks.c
index f9c7ad3bab..77e3ee3104 100644
--- a/libs/libcurl/src/http_chunks.c
+++ b/libs/libcurl/src/http_chunks.c
@@ -182,7 +182,7 @@ static CURLcode httpchunk_readwrite(struct Curl_easy *data,
case CHUNK_LF:
/* waiting for the LF after a chunk size */
if(*buf == 0x0a) {
- /* we're now expecting data to come, unless size was zero! */
+ /* we are now expecting data to come, unless size was zero! */
if(0 == ch->datasize) {
ch->state = CHUNK_TRAILER; /* now check for trailers */
}
@@ -289,9 +289,9 @@ static CURLcode httpchunk_readwrite(struct Curl_easy *data,
break;
}
else {
- /* no trailer, we're on the final CRLF pair */
+ /* no trailer, we are on the final CRLF pair */
ch->state = CHUNK_TRAILER_POSTCR;
- break; /* don't advance the pointer */
+ break; /* do not advance the pointer */
}
}
else {
@@ -344,7 +344,7 @@ static CURLcode httpchunk_readwrite(struct Curl_easy *data,
blen--;
(*pconsumed)++;
/* Record the length of any data left in the end of the buffer
- even if there's no more chunks to read */
+ even if there is no more chunks to read */
ch->datasize = blen;
ch->state = CHUNK_DONE;
CURL_TRC_WRITE(data, "http_chunk, response complete");
@@ -470,7 +470,7 @@ const struct Curl_cwtype Curl_httpchunk_unencoder = {
sizeof(struct chunked_writer)
};
-/* max length of a HTTP chunk that we want to generate */
+/* max length of an HTTP chunk that we want to generate */
#define CURL_CHUNKED_MINLEN (1024)
#define CURL_CHUNKED_MAXLEN (64 * 1024)
@@ -659,6 +659,7 @@ const struct Curl_crtype Curl_httpchunk_encoder = {
Curl_creader_def_resume_from,
Curl_creader_def_rewind,
Curl_creader_def_unpause,
+ Curl_creader_def_is_paused,
Curl_creader_def_done,
sizeof(struct chunked_reader)
};
diff --git a/libs/libcurl/src/http_chunks.h b/libs/libcurl/src/http_chunks.h
index f40f57e723..e124882203 100644
--- a/libs/libcurl/src/http_chunks.h
+++ b/libs/libcurl/src/http_chunks.h
@@ -33,12 +33,12 @@ struct connectdata;
/*
* The longest possible hexadecimal number we support in a chunked transfer.
* Neither RFC2616 nor the later HTTP specs define a maximum chunk size.
- * For 64 bit curl_off_t we support 16 digits. For 32 bit, 8 digits.
+ * For 64-bit curl_off_t we support 16 digits. For 32-bit, 8 digits.
*/
#define CHUNK_MAXNUM_LEN (SIZEOF_CURL_OFF_T * 2)
typedef enum {
- /* await and buffer all hexadecimal digits until we get one that isn't a
+ /* await and buffer all hexadecimal digits until we get one that is not a
hexadecimal digit. When done, we go CHUNK_LF */
CHUNK_HEX,
@@ -54,9 +54,9 @@ typedef enum {
big deal. */
CHUNK_POSTLF,
- /* Used to mark that we're out of the game. NOTE: that there's a 'datasize'
- field in the struct that will tell how many bytes that were not passed to
- the client in the end of the last buffer! */
+ /* Used to mark that we are out of the game. NOTE: that there is a
+ 'datasize' field in the struct that will tell how many bytes that were
+ not passed to the client in the end of the last buffer! */
CHUNK_STOP,
/* At this point optional trailer headers can be found, unless the next line
diff --git a/libs/libcurl/src/http_negotiate.c b/libs/libcurl/src/http_negotiate.c
index 545e703f71..4613bf8eda 100644
--- a/libs/libcurl/src/http_negotiate.c
+++ b/libs/libcurl/src/http_negotiate.c
@@ -95,7 +95,7 @@ CURLcode Curl_input_negotiate(struct Curl_easy *data, struct connectdata *conn,
Curl_http_auth_cleanup_negotiate(conn);
}
else if(state != GSS_AUTHNONE) {
- /* The server rejected our authentication and hasn't supplied any more
+ /* The server rejected our authentication and has not supplied any more
negotiation mechanisms */
Curl_http_auth_cleanup_negotiate(conn);
return CURLE_LOGIN_DENIED;
@@ -218,7 +218,7 @@ CURLcode Curl_output_negotiate(struct Curl_easy *data,
if(*state == GSS_AUTHDONE || *state == GSS_AUTHSUCC) {
/* connection is already authenticated,
- * don't send a header in future requests */
+ * do not send a header in future requests */
authp->done = TRUE;
}
diff --git a/libs/libcurl/src/http_ntlm.c b/libs/libcurl/src/http_ntlm.c
index 83cd435fdc..a93545be32 100644
--- a/libs/libcurl/src/http_ntlm.c
+++ b/libs/libcurl/src/http_ntlm.c
@@ -200,7 +200,7 @@ CURLcode Curl_output_ntlm(struct Curl_easy *data, bool proxy)
Curl_bufref_init(&ntlmmsg);
- /* connection is already authenticated, don't send a header in future
+ /* connection is already authenticated, do not send a header in future
* requests so go directly to NTLMSTATE_LAST */
if(*state == NTLMSTATE_TYPE3)
*state = NTLMSTATE_LAST;
diff --git a/libs/libcurl/src/http_proxy.c b/libs/libcurl/src/http_proxy.c
index be595ab3f7..73f94575c6 100644
--- a/libs/libcurl/src/http_proxy.c
+++ b/libs/libcurl/src/http_proxy.c
@@ -298,6 +298,7 @@ struct Curl_cftype Curl_cft_http_proxy = {
http_proxy_cf_destroy,
http_proxy_cf_connect,
http_proxy_cf_close,
+ Curl_cf_def_shutdown,
Curl_cf_http_proxy_get_host,
Curl_cf_def_adjust_pollset,
Curl_cf_def_data_pending,
diff --git a/libs/libcurl/src/idn.c b/libs/libcurl/src/idn.c
index bc1a6a7b38..7607e34d82 100644
--- a/libs/libcurl/src/idn.c
+++ b/libs/libcurl/src/idn.c
@@ -54,56 +54,56 @@
#if defined(USE_APPLE_IDN)
#include <unicode/uidna.h>
+#define MAX_HOST_LENGTH 512
+
static CURLcode mac_idn_to_ascii(const char *in, char **out)
{
- UErrorCode err = U_ZERO_ERROR;
- UIDNA* idna = uidna_openUTS46(UIDNA_CHECK_BIDI, &err);
- if(U_FAILURE(err)) {
- return CURLE_OUT_OF_MEMORY;
- }
- else {
- UIDNAInfo info = UIDNA_INFO_INITIALIZER;
- char buffer[256] = {0};
- (void)uidna_nameToASCII_UTF8(idna, in, -1, buffer,
- sizeof(buffer), &info, &err);
- uidna_close(idna);
- if(U_FAILURE(err)) {
- return CURLE_URL_MALFORMAT;
- }
- else {
- *out = strdup(buffer);
- if(*out)
- return CURLE_OK;
- else
- return CURLE_OUT_OF_MEMORY;
+ size_t inlen = strlen(in);
+ if(inlen < MAX_HOST_LENGTH) {
+ UErrorCode err = U_ZERO_ERROR;
+ UIDNA* idna = uidna_openUTS46(
+ UIDNA_CHECK_BIDI|UIDNA_NONTRANSITIONAL_TO_ASCII, &err);
+ if(!U_FAILURE(err)) {
+ UIDNAInfo info = UIDNA_INFO_INITIALIZER;
+ char buffer[MAX_HOST_LENGTH] = {0};
+ (void)uidna_nameToASCII_UTF8(idna, in, -1, buffer,
+ sizeof(buffer) - 1, &info, &err);
+ uidna_close(idna);
+ if(!U_FAILURE(err)) {
+ *out = strdup(buffer);
+ if(*out)
+ return CURLE_OK;
+ else
+ return CURLE_OUT_OF_MEMORY;
+ }
}
}
+ return CURLE_URL_MALFORMAT;
}
static CURLcode mac_ascii_to_idn(const char *in, char **out)
{
- UErrorCode err = U_ZERO_ERROR;
- UIDNA* idna = uidna_openUTS46(UIDNA_CHECK_BIDI, &err);
- if(U_FAILURE(err)) {
- return CURLE_OUT_OF_MEMORY;
- }
- else {
- UIDNAInfo info = UIDNA_INFO_INITIALIZER;
- char buffer[256] = {0};
- (void)uidna_nameToUnicodeUTF8(idna, in, -1, buffer,
- sizeof(buffer), &info, &err);
- uidna_close(idna);
- if(U_FAILURE(err)) {
- return CURLE_URL_MALFORMAT;
- }
- else {
- *out = strdup(buffer);
- if(*out)
- return CURLE_OK;
- else
- return CURLE_OUT_OF_MEMORY;
+ size_t inlen = strlen(in);
+ if(inlen < MAX_HOST_LENGTH) {
+ UErrorCode err = U_ZERO_ERROR;
+ UIDNA* idna = uidna_openUTS46(
+ UIDNA_CHECK_BIDI|UIDNA_NONTRANSITIONAL_TO_UNICODE, &err);
+ if(!U_FAILURE(err)) {
+ UIDNAInfo info = UIDNA_INFO_INITIALIZER;
+ char buffer[MAX_HOST_LENGTH] = {0};
+ (void)uidna_nameToUnicodeUTF8(idna, in, -1, buffer,
+ sizeof(buffer) - 1, &info, &err);
+ uidna_close(idna);
+ if(!U_FAILURE(err)) {
+ *out = strdup(buffer);
+ if(*out)
+ return CURLE_OK;
+ else
+ return CURLE_OUT_OF_MEMORY;
+ }
}
}
+ return CURLE_URL_MALFORMAT;
}
#endif
@@ -207,7 +207,7 @@ bool Curl_is_ASCII_name(const char *hostname)
* Curl_idn_decode() returns an allocated IDN decoded string if it was
* possible. NULL on error.
*
- * CURLE_URL_MALFORMAT - the host name could not be converted
+ * CURLE_URL_MALFORMAT - the hostname could not be converted
* CURLE_OUT_OF_MEMORY - memory problem
*
*/
@@ -319,7 +319,7 @@ void Curl_free_idnconverted_hostname(struct hostname *host)
*/
CURLcode Curl_idnconvert_hostname(struct hostname *host)
{
- /* set the name we use to display the host name */
+ /* set the name we use to display the hostname */
host->dispname = host->name;
#ifdef USE_IDN
diff --git a/libs/libcurl/src/imap.c b/libs/libcurl/src/imap.c
index 0f67d5c71c..95c36b7868 100644
--- a/libs/libcurl/src/imap.c
+++ b/libs/libcurl/src/imap.c
@@ -512,7 +512,7 @@ static CURLcode imap_perform_login(struct Curl_easy *data,
char *passwd;
/* Check we have a username and password to authenticate with and end the
- connect phase if we don't */
+ connect phase if we do not */
if(!data->state.aptr.user) {
imap_state(data, IMAP_STOP);
@@ -612,7 +612,7 @@ static CURLcode imap_perform_authentication(struct Curl_easy *data,
saslprogress progress;
/* Check if already authenticated OR if there is enough data to authenticate
- with and end the connect phase if we don't */
+ with and end the connect phase if we do not */
if(imapc->preauth ||
!Curl_sasl_can_authenticate(&imapc->sasl, data)) {
imap_state(data, IMAP_STOP);
@@ -776,7 +776,7 @@ static CURLcode imap_perform_append(struct Curl_easy *data)
/* Prepare the mime data if some. */
if(data->set.mimepost.kind != MIMEKIND_NONE) {
/* Use the whole structure as data. */
- data->set.mimepost.flags &= ~MIME_BODY_ONLY;
+ data->set.mimepost.flags &= ~(unsigned int)MIME_BODY_ONLY;
/* Add external headers and mime version. */
curl_mime_headers(&data->set.mimepost, data->set.headers, 0);
@@ -1187,7 +1187,7 @@ static CURLcode imap_state_fetch_resp(struct Curl_easy *data,
chunk = (size_t)size;
if(!chunk) {
- /* no size, we're done with the data */
+ /* no size, we are done with the data */
imap_state(data, IMAP_STOP);
return CURLE_OK;
}
@@ -1214,18 +1214,18 @@ static CURLcode imap_state_fetch_resp(struct Curl_easy *data,
if(data->req.bytecount == size)
/* The entire data is already transferred! */
- Curl_xfer_setup(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup_nop(data);
else {
/* IMAP download */
data->req.maxdownload = size;
/* force a recv/send check of this connection, as the data might've been
read off the socket already */
data->state.select_bits = CURL_CSELECT_IN;
- Curl_xfer_setup(data, FIRSTSOCKET, size, FALSE, -1);
+ Curl_xfer_setup1(data, CURL_XFER_RECV, size, FALSE);
}
}
else {
- /* We don't know how to parse this line */
+ /* We do not know how to parse this line */
failf(data, "Failed to parse FETCH response.");
result = CURLE_WEIRD_SERVER_REPLY;
}
@@ -1269,7 +1269,7 @@ static CURLcode imap_state_append_resp(struct Curl_easy *data, int imapcode,
Curl_pgrsSetUploadSize(data, data->state.infilesize);
/* IMAP upload */
- Curl_xfer_setup(data, -1, -1, FALSE, FIRSTSOCKET);
+ Curl_xfer_setup1(data, CURL_XFER_SEND, -1, FALSE);
/* End of DO phase */
imap_state(data, IMAP_STOP);
@@ -1694,7 +1694,7 @@ static CURLcode imap_dophase_done(struct Curl_easy *data, bool connected)
if(imap->transfer != PPTRANSFER_BODY)
/* no data to transfer */
- Curl_xfer_setup(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup_nop(data);
return CURLE_OK;
}
diff --git a/libs/libcurl/src/inet_ntop.c b/libs/libcurl/src/inet_ntop.c
index 776e7d397b..408bdf1537 100644
--- a/libs/libcurl/src/inet_ntop.c
+++ b/libs/libcurl/src/inet_ntop.c
@@ -58,7 +58,7 @@
* - uses no statics
* - takes a unsigned char* not an in_addr as input
*/
-static char *inet_ntop4 (const unsigned char *src, char *dst, size_t size)
+static char *inet_ntop4(const unsigned char *src, char *dst, size_t size)
{
char tmp[sizeof("255.255.255.255")];
size_t len;
@@ -84,14 +84,14 @@ static char *inet_ntop4 (const unsigned char *src, char *dst, size_t size)
/*
* Convert IPv6 binary address into presentation (printable) format.
*/
-static char *inet_ntop6 (const unsigned char *src, char *dst, size_t size)
+static char *inet_ntop6(const unsigned char *src, char *dst, size_t size)
{
/*
* Note that int32_t and int16_t need only be "at least" large enough
- * to contain a value of the specified size. On some systems, like
+ * to contain a value of the specified size. On some systems, like
* Crays, there is no such thing as an integer variable with 16 bits.
* Keep this in mind if you think this function should have been coded
- * to use pointer overlays. All the world's not a VAX.
+ * to use pointer overlays. All the world's not a VAX.
*/
char tmp[sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")];
char *tp;
@@ -168,7 +168,7 @@ static char *inet_ntop6 (const unsigned char *src, char *dst, size_t size)
*tp++ = ':';
*tp++ = '\0';
- /* Check for overflow, copy, and we're done.
+ /* Check for overflow, copy, and we are done.
*/
if((size_t)(tp - tmp) > size) {
errno = ENOSPC;
@@ -185,10 +185,9 @@ static char *inet_ntop6 (const unsigned char *src, char *dst, size_t size)
* Returns NULL on error and errno set with the specific
* error, EAFNOSUPPORT or ENOSPC.
*
- * On Windows we store the error in the thread errno, not
- * in the winsock error code. This is to avoid losing the
- * actual last winsock error. So when this function returns
- * NULL, check errno not SOCKERRNO.
+ * On Windows we store the error in the thread errno, not in the winsock error
+ * code. This is to avoid losing the actual last winsock error. When this
+ * function returns NULL, check errno not SOCKERRNO.
*/
char *Curl_inet_ntop(int af, const void *src, char *buf, size_t size)
{
diff --git a/libs/libcurl/src/inet_ntop.h b/libs/libcurl/src/inet_ntop.h
index 71f4270900..e2c3b4b5ac 100644
--- a/libs/libcurl/src/inet_ntop.h
+++ b/libs/libcurl/src/inet_ntop.h
@@ -32,8 +32,13 @@ char *Curl_inet_ntop(int af, const void *addr, char *buf, size_t size);
#ifdef HAVE_ARPA_INET_H
#include <arpa/inet.h>
#endif
+#ifdef _WIN32
#define Curl_inet_ntop(af,addr,buf,size) \
- inet_ntop(af, addr, buf, (curl_socklen_t)size)
+ inet_ntop(af, addr, buf, size)
+#else
+#define Curl_inet_ntop(af,addr,buf,size) \
+ inet_ntop(af, addr, buf, (curl_socklen_t)(size))
+#endif
#endif
#endif /* HEADER_CURL_INET_NTOP_H */
diff --git a/libs/libcurl/src/inet_pton.c b/libs/libcurl/src/inet_pton.c
index 923b1cccd7..6f39bc170f 100644
--- a/libs/libcurl/src/inet_pton.c
+++ b/libs/libcurl/src/inet_pton.c
@@ -48,8 +48,8 @@
#endif
/*
- * WARNING: Don't even consider trying to compile this on a system where
- * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX.
+ * WARNING: Do not even consider trying to compile this on a system where
+ * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX.
*/
static int inet_pton4(const char *src, unsigned char *dst);
@@ -61,12 +61,12 @@ static int inet_pton6(const char *src, unsigned char *dst);
* to network format (which is usually some kind of binary format).
* return:
* 1 if the address was valid for the specified address family
- * 0 if the address wasn't valid (`dst' is untouched in this case)
+ * 0 if the address was not valid (`dst' is untouched in this case)
* -1 if some other error occurred (`dst' is untouched in this case, too)
* notice:
* On Windows we store the error in the thread errno, not
* in the winsock error code. This is to avoid losing the
- * actual last winsock error. So when this function returns
+ * actual last winsock error. When this function returns
* -1, check errno not SOCKERRNO.
* author:
* Paul Vixie, 1996.
@@ -92,7 +92,7 @@ Curl_inet_pton(int af, const char *src, void *dst)
* return:
* 1 if `src' is a valid dotted quad, else 0.
* notice:
- * does not touch `dst' unless it's returning 1.
+ * does not touch `dst' unless it is returning 1.
* author:
* Paul Vixie, 1996.
*/
@@ -147,7 +147,7 @@ inet_pton4(const char *src, unsigned char *dst)
* return:
* 1 if `src' is a valid [RFC1884 2.2] address, else 0.
* notice:
- * (1) does not touch `dst' unless it's returning 1.
+ * (1) does not touch `dst' unless it is returning 1.
* (2) :: in a full address is silently ignored.
* credit:
* inspired by Mark Andrews.
@@ -221,7 +221,7 @@ inet_pton6(const char *src, unsigned char *dst)
if(colonp) {
/*
* Since some memmove()'s erroneously fail to handle
- * overlapping regions, we'll do the shift by hand.
+ * overlapping regions, we will do the shift by hand.
*/
const ssize_t n = tp - colonp;
ssize_t i;
diff --git a/libs/libcurl/src/krb5.c b/libs/libcurl/src/krb5.c
index a635966d51..f7f6d2570c 100644
--- a/libs/libcurl/src/krb5.c
+++ b/libs/libcurl/src/krb5.c
@@ -25,7 +25,7 @@
* THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -169,7 +169,7 @@ krb5_encode(void *app_data, const void *from, int length, int level, void **to)
* libraries modify the input buffer in gss_wrap()
*/
dec.value = (void *)from;
- dec.length = length;
+ dec.length = (size_t)length;
maj = gss_wrap(&min, *context,
level == PROT_PRIVATE,
GSS_C_QOP_DEFAULT,
@@ -178,7 +178,7 @@ krb5_encode(void *app_data, const void *from, int length, int level, void **to)
if(maj != GSS_S_COMPLETE)
return -1;
- /* malloc a new buffer, in case gss_release_buffer doesn't work as
+ /* malloc a new buffer, in case gss_release_buffer does not work as
expected */
*to = malloc(enc.length);
if(!*to)
@@ -227,7 +227,7 @@ krb5_auth(void *app_data, struct Curl_easy *data, struct connectdata *conn)
/* this loop will execute twice (once for service, once for host) */
for(;;) {
- /* this really shouldn't be repeated here, but can't help it */
+ /* this really should not be repeated here, but cannot help it */
if(service == srv_host) {
result = ftpsend(data, conn, "AUTH GSSAPI");
if(result)
@@ -329,7 +329,7 @@ krb5_auth(void *app_data, struct Curl_easy *data, struct connectdata *conn)
size_t len = Curl_dyn_len(&pp->recvbuf);
p = Curl_dyn_ptr(&pp->recvbuf);
if((len < 4) || (p[0] != '2' && p[0] != '3')) {
- infof(data, "Server didn't accept auth data");
+ infof(data, "Server did not accept auth data");
ret = AUTH_ERROR;
break;
}
@@ -524,7 +524,7 @@ static CURLcode read_data(struct Curl_easy *data, int sockindex,
return result;
if(len) {
- len = ntohl(len);
+ len = (int)ntohl((uint32_t)len);
if(len > CURL_MAX_INPUT_LENGTH)
return CURLE_TOO_LARGE;
@@ -536,7 +536,7 @@ static CURLcode read_data(struct Curl_easy *data, int sockindex,
do {
char buffer[1024];
nread = CURLMIN(len, (int)sizeof(buffer));
- result = socket_read(data, sockindex, buffer, nread);
+ result = socket_read(data, sockindex, buffer, (size_t)nread);
if(result)
return result;
result = Curl_dyn_addn(&buf->buf, buffer, nread);
@@ -630,7 +630,7 @@ static void do_sec_send(struct Curl_easy *data, struct connectdata *conn,
else
prot_level = conn->command_prot;
}
- bytes = conn->mech->encode(conn->app_data, from, length, prot_level,
+ bytes = conn->mech->encode(conn->app_data, from, length, (int)prot_level,
(void **)&buffer);
if(!buffer || bytes <= 0)
return; /* error */
@@ -658,7 +658,7 @@ static void do_sec_send(struct Curl_easy *data, struct connectdata *conn,
}
}
else {
- htonl_bytes = htonl(bytes);
+ htonl_bytes = (int)htonl((OM_uint32)bytes);
socket_write(data, fd, &htonl_bytes, sizeof(htonl_bytes));
socket_write(data, fd, buffer, curlx_sitouz(bytes));
}
@@ -724,7 +724,7 @@ int Curl_sec_read_msg(struct Curl_easy *data, struct connectdata *conn,
decoded_len = curlx_uztosi(decoded_sz);
decoded_len = conn->mech->decode(conn->app_data, buf, decoded_len,
- level, conn);
+ (int)level, conn);
if(decoded_len <= 0) {
free(buf);
return -1;
@@ -789,7 +789,7 @@ static int sec_set_protection_level(struct Curl_easy *data)
if(pbsz) {
/* stick to default value if the check fails */
if(ISDIGIT(pbsz[5]))
- buffer_size = atoi(&pbsz[5]);
+ buffer_size = (unsigned int)atoi(&pbsz[5]);
if(buffer_size < conn->buffer_size)
conn->buffer_size = buffer_size;
}
@@ -878,7 +878,7 @@ static CURLcode choose_mech(struct Curl_easy *data, struct connectdata *conn)
if(ret != AUTH_CONTINUE) {
if(ret != AUTH_OK) {
- /* Mechanism has dumped the error to stderr, don't error here. */
+ /* Mechanism has dumped the error to stderr, do not error here. */
return CURLE_USE_SSL_FAILED;
}
DEBUGASSERT(ret == AUTH_OK);
diff --git a/libs/libcurl/src/ldap.c b/libs/libcurl/src/ldap.c
index af01902594..073f003fd4 100644
--- a/libs/libcurl/src/ldap.c
+++ b/libs/libcurl/src/ldap.c
@@ -252,16 +252,17 @@ static int ldap_win_bind_auth(LDAP *server, const char *user,
}
if(method && user && passwd) {
- rc = Curl_create_sspi_identity(user, passwd, &cred);
+ CURLcode res = Curl_create_sspi_identity(user, passwd, &cred);
+ rc = (int)res;
if(!rc) {
- rc = ldap_bind_s(server, NULL, (TCHAR *)&cred, method);
+ rc = (int)ldap_bind_s(server, NULL, (TCHAR *)&cred, method);
Curl_sspi_free_identity(&cred);
}
}
else {
/* proceed with current user credentials */
method = LDAP_AUTH_NEGOTIATE;
- rc = ldap_bind_s(server, NULL, NULL, method);
+ rc = (int)ldap_bind_s(server, NULL, NULL, method);
}
return rc;
}
@@ -279,14 +280,14 @@ static int ldap_win_bind(struct Curl_easy *data, LDAP *server,
inuser = curlx_convert_UTF8_to_tchar((char *) user);
inpass = curlx_convert_UTF8_to_tchar((char *) passwd);
- rc = ldap_simple_bind_s(server, inuser, inpass);
+ rc = (int)ldap_simple_bind_s(server, inuser, inpass);
curlx_unicodefree(inuser);
curlx_unicodefree(inpass);
}
#if defined(USE_WINDOWS_SSPI)
else {
- rc = ldap_win_bind_auth(server, user, passwd, data->set.httpauth);
+ rc = (int)ldap_win_bind_auth(server, user, passwd, data->set.httpauth);
}
#endif
@@ -296,8 +297,10 @@ static int ldap_win_bind(struct Curl_easy *data, LDAP *server,
#if defined(USE_WIN32_LDAP)
#define FREE_ON_WINLDAP(x) curlx_unicodefree(x)
+#define curl_ldap_num_t ULONG
#else
#define FREE_ON_WINLDAP(x)
+#define curl_ldap_num_t int
#endif
@@ -337,7 +340,7 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done)
rc = _ldap_url_parse(data, conn, &ludp);
#endif
if(rc) {
- failf(data, "Bad LDAP URL: %s", ldap_err2string(rc));
+ failf(data, "Bad LDAP URL: %s", ldap_err2string((curl_ldap_num_t)rc));
result = CURLE_URL_MALFORMAT;
goto quit;
}
@@ -372,8 +375,8 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done)
if(ldap_ssl) {
#ifdef HAVE_LDAP_SSL
#ifdef USE_WIN32_LDAP
- /* Win32 LDAP SDK doesn't support insecure mode without CA! */
- server = ldap_sslinit(host, conn->primary.remote_port, 1);
+ /* Win32 LDAP SDK does not support insecure mode without CA! */
+ server = ldap_sslinit(host, (curl_ldap_num_t)conn->primary.remote_port, 1);
ldap_set_option(server, LDAP_OPT_SSL, LDAP_OPT_ON);
#else
int ldap_option;
@@ -503,7 +506,7 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done)
goto quit;
}
else {
- server = ldap_init(host, conn->primary.remote_port);
+ server = ldap_init(host, (curl_ldap_num_t)conn->primary.remote_port);
if(!server) {
failf(data, "LDAP local: Cannot connect to %s:%u",
conn->host.dispname, conn->primary.remote_port);
@@ -529,7 +532,7 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done)
if(rc) {
#ifdef USE_WIN32_LDAP
failf(data, "LDAP local: bind via ldap_win_bind %s",
- ldap_err2string(rc));
+ ldap_err2string((ULONG)rc));
#else
failf(data, "LDAP local: bind via ldap_simple_bind_s %s",
ldap_err2string(rc));
@@ -539,11 +542,12 @@ static CURLcode ldap_do(struct Curl_easy *data, bool *done)
}
Curl_pgrsSetDownloadCounter(data, 0);
- rc = ldap_search_s(server, ludp->lud_dn, ludp->lud_scope,
- ludp->lud_filter, ludp->lud_attrs, 0, &ldapmsg);
+ rc = (int)ldap_search_s(server, ludp->lud_dn,
+ (curl_ldap_num_t)ludp->lud_scope,
+ ludp->lud_filter, ludp->lud_attrs, 0, &ldapmsg);
if(rc && rc != LDAP_SIZELIMIT_EXCEEDED) {
- failf(data, "LDAP remote: %s", ldap_err2string(rc));
+ failf(data, "LDAP remote: %s", ldap_err2string((curl_ldap_num_t)rc));
result = CURLE_LDAP_SEARCH_FAILED;
goto quit;
}
@@ -754,7 +758,7 @@ quit:
FREE_ON_WINLDAP(host);
/* no data to transfer */
- Curl_xfer_setup(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup_nop(data);
connclose(conn, "LDAP connection always disable reuse");
return result;
diff --git a/libs/libcurl/src/libcurl.rc b/libs/libcurl/src/libcurl.rc
index a9c9c5384e..1fc7dc58d4 100644
--- a/libs/libcurl/src/libcurl.rc
+++ b/libs/libcurl/src/libcurl.rc
@@ -32,7 +32,7 @@ VS_VERSION_INFO VERSIONINFO
FILEVERSION RC_VERSION
PRODUCTVERSION RC_VERSION
FILEFLAGSMASK VS_FFI_FILEFLAGSMASK
-#if defined(DEBUGBUILD) || defined(_DEBUG)
+#if defined(DEBUGBUILD) || defined(UNITTESTS) || defined(CURLDEBUG) || defined(_DEBUG)
FILEFLAGS VS_FF_DEBUG
#else
FILEFLAGS 0L
diff --git a/libs/libcurl/src/macos.c b/libs/libcurl/src/macos.c
index c6d2c11cce..d8f4fe1d7c 100644
--- a/libs/libcurl/src/macos.c
+++ b/libs/libcurl/src/macos.c
@@ -38,8 +38,8 @@ CURLcode Curl_macos_init(void)
/*
* The automagic conversion from IPv4 literals to IPv6 literals only
* works if the SCDynamicStoreCopyProxies system function gets called
- * first. As Curl currently doesn't support system-wide HTTP proxies, we
- * therefore don't use any value this function might return.
+ * first. As Curl currently does not support system-wide HTTP proxies, we
+ * therefore do not use any value this function might return.
*
* This function is only available on macOS and is not needed for
* IPv4-only builds, hence the conditions for defining
diff --git a/libs/libcurl/src/md4.c b/libs/libcurl/src/md4.c
index 73ad24e33b..cbeaa0ff55 100644
--- a/libs/libcurl/src/md4.c
+++ b/libs/libcurl/src/md4.c
@@ -37,6 +37,9 @@
#if (OPENSSL_VERSION_NUMBER >= 0x30000000L) && !defined(USE_AMISSL)
/* OpenSSL 3.0.0 marks the MD4 functions as deprecated */
#define OPENSSL_NO_MD4
+#else
+/* Cover also OPENSSL_NO_MD4 configured in openssl */
+#include <openssl/opensslconf.h>
#endif
#endif /* USE_OPENSSL */
@@ -217,7 +220,7 @@ static void MD4_Final(unsigned char *result, MD4_CTX *ctx)
}
#else
-/* When no other crypto library is available, or the crypto library doesn't
+/* When no other crypto library is available, or the crypto library does not
* support MD4, we use this code segment this implementation of it
*
* This is an OpenSSL-compatible implementation of the RSA Data Security, Inc.
@@ -229,8 +232,8 @@ static void MD4_Final(unsigned char *result, MD4_CTX *ctx)
* Author:
* Alexander Peslyak, better known as Solar Designer <solar at openwall.com>
*
- * This software was written by Alexander Peslyak in 2001. No copyright is
- * claimed, and the software is hereby placed in the public domain. In case
+ * This software was written by Alexander Peslyak in 2001. No copyright is
+ * claimed, and the software is hereby placed in the public domain. In case
* this attempt to disclaim copyright and place the software in the public
* domain is deemed null and void, then the software is Copyright (c) 2001
* Alexander Peslyak and it is hereby released to the general public under the
@@ -239,19 +242,19 @@ static void MD4_Final(unsigned char *result, MD4_CTX *ctx)
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
- * There's ABSOLUTELY NO WARRANTY, express or implied.
+ * There is ABSOLUTELY NO WARRANTY, express or implied.
*
* (This is a heavily cut-down "BSD license".)
*
* This differs from Colin Plumb's older public domain implementation in that
* no exactly 32-bit integer data type is required (any 32-bit or wider
- * unsigned integer data type will do), there's no compile-time endianness
- * configuration, and the function prototypes match OpenSSL's. No code from
+ * unsigned integer data type will do), there is no compile-time endianness
+ * configuration, and the function prototypes match OpenSSL's. No code from
* Colin Plumb's implementation has been reused; this comment merely compares
* the properties of the two independent implementations.
*
* The primary goals of this implementation are portability and ease of use.
- * It is meant to be fast, but not as fast as possible. Some known
+ * It is meant to be fast, but not as fast as possible. Some known
* optimizations are not included to reduce source code size and avoid
* compile-time configuration.
*/
@@ -277,14 +280,14 @@ static void MD4_Final(unsigned char *result, MD4_CTX *ctx);
* F and G are optimized compared to their RFC 1320 definitions, with the
* optimization for F borrowed from Colin Plumb's MD5 implementation.
*/
-#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
-#define G(x, y, z) (((x) & ((y) | (z))) | ((y) & (z)))
-#define H(x, y, z) ((x) ^ (y) ^ (z))
+#define MD4_F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
+#define MD4_G(x, y, z) (((x) & ((y) | (z))) | ((y) & (z)))
+#define MD4_H(x, y, z) ((x) ^ (y) ^ (z))
/*
* The MD4 transformation for all three rounds.
*/
-#define STEP(f, a, b, c, d, x, s) \
+#define MD4_STEP(f, a, b, c, d, x, s) \
(a) += f((b), (c), (d)) + (x); \
(a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s))));
@@ -293,30 +296,31 @@ static void MD4_Final(unsigned char *result, MD4_CTX *ctx);
* in a properly aligned word in host byte order.
*
* The check for little-endian architectures that tolerate unaligned
- * memory accesses is just an optimization. Nothing will break if it
- * doesn't work.
+ * memory accesses is just an optimization. Nothing will break if it
+ * does not work.
*/
#if defined(__i386__) || defined(__x86_64__) || defined(__vax__)
-#define SET(n) \
+#define MD4_SET(n) \
(*(MD4_u32plus *)(void *)&ptr[(n) * 4])
-#define GET(n) \
- SET(n)
+#define MD4_GET(n) \
+ MD4_SET(n)
#else
-#define SET(n) \
+#define MD4_SET(n) \
(ctx->block[(n)] = \
(MD4_u32plus)ptr[(n) * 4] | \
((MD4_u32plus)ptr[(n) * 4 + 1] << 8) | \
((MD4_u32plus)ptr[(n) * 4 + 2] << 16) | \
((MD4_u32plus)ptr[(n) * 4 + 3] << 24))
-#define GET(n) \
+#define MD4_GET(n) \
(ctx->block[(n)])
#endif
/*
* This processes one or more 64-byte data blocks, but does NOT update
- * the bit counters. There are no alignment requirements.
+ * the bit counters. There are no alignment requirements.
*/
-static const void *body(MD4_CTX *ctx, const void *data, unsigned long size)
+static const void *my_md4_body(MD4_CTX *ctx,
+ const void *data, unsigned long size)
{
const unsigned char *ptr;
MD4_u32plus a, b, c, d;
@@ -337,58 +341,58 @@ static const void *body(MD4_CTX *ctx, const void *data, unsigned long size)
saved_d = d;
/* Round 1 */
- STEP(F, a, b, c, d, SET(0), 3)
- STEP(F, d, a, b, c, SET(1), 7)
- STEP(F, c, d, a, b, SET(2), 11)
- STEP(F, b, c, d, a, SET(3), 19)
- STEP(F, a, b, c, d, SET(4), 3)
- STEP(F, d, a, b, c, SET(5), 7)
- STEP(F, c, d, a, b, SET(6), 11)
- STEP(F, b, c, d, a, SET(7), 19)
- STEP(F, a, b, c, d, SET(8), 3)
- STEP(F, d, a, b, c, SET(9), 7)
- STEP(F, c, d, a, b, SET(10), 11)
- STEP(F, b, c, d, a, SET(11), 19)
- STEP(F, a, b, c, d, SET(12), 3)
- STEP(F, d, a, b, c, SET(13), 7)
- STEP(F, c, d, a, b, SET(14), 11)
- STEP(F, b, c, d, a, SET(15), 19)
+ MD4_STEP(MD4_F, a, b, c, d, MD4_SET(0), 3)
+ MD4_STEP(MD4_F, d, a, b, c, MD4_SET(1), 7)
+ MD4_STEP(MD4_F, c, d, a, b, MD4_SET(2), 11)
+ MD4_STEP(MD4_F, b, c, d, a, MD4_SET(3), 19)
+ MD4_STEP(MD4_F, a, b, c, d, MD4_SET(4), 3)
+ MD4_STEP(MD4_F, d, a, b, c, MD4_SET(5), 7)
+ MD4_STEP(MD4_F, c, d, a, b, MD4_SET(6), 11)
+ MD4_STEP(MD4_F, b, c, d, a, MD4_SET(7), 19)
+ MD4_STEP(MD4_F, a, b, c, d, MD4_SET(8), 3)
+ MD4_STEP(MD4_F, d, a, b, c, MD4_SET(9), 7)
+ MD4_STEP(MD4_F, c, d, a, b, MD4_SET(10), 11)
+ MD4_STEP(MD4_F, b, c, d, a, MD4_SET(11), 19)
+ MD4_STEP(MD4_F, a, b, c, d, MD4_SET(12), 3)
+ MD4_STEP(MD4_F, d, a, b, c, MD4_SET(13), 7)
+ MD4_STEP(MD4_F, c, d, a, b, MD4_SET(14), 11)
+ MD4_STEP(MD4_F, b, c, d, a, MD4_SET(15), 19)
/* Round 2 */
- STEP(G, a, b, c, d, GET(0) + 0x5a827999, 3)
- STEP(G, d, a, b, c, GET(4) + 0x5a827999, 5)
- STEP(G, c, d, a, b, GET(8) + 0x5a827999, 9)
- STEP(G, b, c, d, a, GET(12) + 0x5a827999, 13)
- STEP(G, a, b, c, d, GET(1) + 0x5a827999, 3)
- STEP(G, d, a, b, c, GET(5) + 0x5a827999, 5)
- STEP(G, c, d, a, b, GET(9) + 0x5a827999, 9)
- STEP(G, b, c, d, a, GET(13) + 0x5a827999, 13)
- STEP(G, a, b, c, d, GET(2) + 0x5a827999, 3)
- STEP(G, d, a, b, c, GET(6) + 0x5a827999, 5)
- STEP(G, c, d, a, b, GET(10) + 0x5a827999, 9)
- STEP(G, b, c, d, a, GET(14) + 0x5a827999, 13)
- STEP(G, a, b, c, d, GET(3) + 0x5a827999, 3)
- STEP(G, d, a, b, c, GET(7) + 0x5a827999, 5)
- STEP(G, c, d, a, b, GET(11) + 0x5a827999, 9)
- STEP(G, b, c, d, a, GET(15) + 0x5a827999, 13)
+ MD4_STEP(MD4_G, a, b, c, d, MD4_GET(0) + 0x5a827999, 3)
+ MD4_STEP(MD4_G, d, a, b, c, MD4_GET(4) + 0x5a827999, 5)
+ MD4_STEP(MD4_G, c, d, a, b, MD4_GET(8) + 0x5a827999, 9)
+ MD4_STEP(MD4_G, b, c, d, a, MD4_GET(12) + 0x5a827999, 13)
+ MD4_STEP(MD4_G, a, b, c, d, MD4_GET(1) + 0x5a827999, 3)
+ MD4_STEP(MD4_G, d, a, b, c, MD4_GET(5) + 0x5a827999, 5)
+ MD4_STEP(MD4_G, c, d, a, b, MD4_GET(9) + 0x5a827999, 9)
+ MD4_STEP(MD4_G, b, c, d, a, MD4_GET(13) + 0x5a827999, 13)
+ MD4_STEP(MD4_G, a, b, c, d, MD4_GET(2) + 0x5a827999, 3)
+ MD4_STEP(MD4_G, d, a, b, c, MD4_GET(6) + 0x5a827999, 5)
+ MD4_STEP(MD4_G, c, d, a, b, MD4_GET(10) + 0x5a827999, 9)
+ MD4_STEP(MD4_G, b, c, d, a, MD4_GET(14) + 0x5a827999, 13)
+ MD4_STEP(MD4_G, a, b, c, d, MD4_GET(3) + 0x5a827999, 3)
+ MD4_STEP(MD4_G, d, a, b, c, MD4_GET(7) + 0x5a827999, 5)
+ MD4_STEP(MD4_G, c, d, a, b, MD4_GET(11) + 0x5a827999, 9)
+ MD4_STEP(MD4_G, b, c, d, a, MD4_GET(15) + 0x5a827999, 13)
/* Round 3 */
- STEP(H, a, b, c, d, GET(0) + 0x6ed9eba1, 3)
- STEP(H, d, a, b, c, GET(8) + 0x6ed9eba1, 9)
- STEP(H, c, d, a, b, GET(4) + 0x6ed9eba1, 11)
- STEP(H, b, c, d, a, GET(12) + 0x6ed9eba1, 15)
- STEP(H, a, b, c, d, GET(2) + 0x6ed9eba1, 3)
- STEP(H, d, a, b, c, GET(10) + 0x6ed9eba1, 9)
- STEP(H, c, d, a, b, GET(6) + 0x6ed9eba1, 11)
- STEP(H, b, c, d, a, GET(14) + 0x6ed9eba1, 15)
- STEP(H, a, b, c, d, GET(1) + 0x6ed9eba1, 3)
- STEP(H, d, a, b, c, GET(9) + 0x6ed9eba1, 9)
- STEP(H, c, d, a, b, GET(5) + 0x6ed9eba1, 11)
- STEP(H, b, c, d, a, GET(13) + 0x6ed9eba1, 15)
- STEP(H, a, b, c, d, GET(3) + 0x6ed9eba1, 3)
- STEP(H, d, a, b, c, GET(11) + 0x6ed9eba1, 9)
- STEP(H, c, d, a, b, GET(7) + 0x6ed9eba1, 11)
- STEP(H, b, c, d, a, GET(15) + 0x6ed9eba1, 15)
+ MD4_STEP(MD4_H, a, b, c, d, MD4_GET(0) + 0x6ed9eba1, 3)
+ MD4_STEP(MD4_H, d, a, b, c, MD4_GET(8) + 0x6ed9eba1, 9)
+ MD4_STEP(MD4_H, c, d, a, b, MD4_GET(4) + 0x6ed9eba1, 11)
+ MD4_STEP(MD4_H, b, c, d, a, MD4_GET(12) + 0x6ed9eba1, 15)
+ MD4_STEP(MD4_H, a, b, c, d, MD4_GET(2) + 0x6ed9eba1, 3)
+ MD4_STEP(MD4_H, d, a, b, c, MD4_GET(10) + 0x6ed9eba1, 9)
+ MD4_STEP(MD4_H, c, d, a, b, MD4_GET(6) + 0x6ed9eba1, 11)
+ MD4_STEP(MD4_H, b, c, d, a, MD4_GET(14) + 0x6ed9eba1, 15)
+ MD4_STEP(MD4_H, a, b, c, d, MD4_GET(1) + 0x6ed9eba1, 3)
+ MD4_STEP(MD4_H, d, a, b, c, MD4_GET(9) + 0x6ed9eba1, 9)
+ MD4_STEP(MD4_H, c, d, a, b, MD4_GET(5) + 0x6ed9eba1, 11)
+ MD4_STEP(MD4_H, b, c, d, a, MD4_GET(13) + 0x6ed9eba1, 15)
+ MD4_STEP(MD4_H, a, b, c, d, MD4_GET(3) + 0x6ed9eba1, 3)
+ MD4_STEP(MD4_H, d, a, b, c, MD4_GET(11) + 0x6ed9eba1, 9)
+ MD4_STEP(MD4_H, c, d, a, b, MD4_GET(7) + 0x6ed9eba1, 11)
+ MD4_STEP(MD4_H, b, c, d, a, MD4_GET(15) + 0x6ed9eba1, 15)
a += saved_a;
b += saved_b;
@@ -442,11 +446,11 @@ static void MD4_Update(MD4_CTX *ctx, const void *data, unsigned long size)
memcpy(&ctx->buffer[used], data, available);
data = (const unsigned char *)data + available;
size -= available;
- body(ctx, ctx->buffer, 64);
+ my_md4_body(ctx, ctx->buffer, 64);
}
if(size >= 64) {
- data = body(ctx, data, size & ~(unsigned long)0x3f);
+ data = my_md4_body(ctx, data, size & ~(unsigned long)0x3f);
size &= 0x3f;
}
@@ -465,7 +469,7 @@ static void MD4_Final(unsigned char *result, MD4_CTX *ctx)
if(available < 8) {
memset(&ctx->buffer[used], 0, available);
- body(ctx, ctx->buffer, 64);
+ my_md4_body(ctx, ctx->buffer, 64);
used = 0;
available = 64;
}
@@ -482,7 +486,7 @@ static void MD4_Final(unsigned char *result, MD4_CTX *ctx)
ctx->buffer[62] = curlx_ultouc((ctx->hi >> 16)&0xff);
ctx->buffer[63] = curlx_ultouc(ctx->hi >> 24);
- body(ctx, ctx->buffer, 64);
+ my_md4_body(ctx, ctx->buffer, 64);
result[0] = curlx_ultouc((ctx->a)&0xff);
result[1] = curlx_ultouc((ctx->a >> 8)&0xff);
diff --git a/libs/libcurl/src/md5.c b/libs/libcurl/src/md5.c
index 9e68d2d98c..6083db9e69 100644
--- a/libs/libcurl/src/md5.c
+++ b/libs/libcurl/src/md5.c
@@ -172,7 +172,7 @@ static void my_md5_final(unsigned char *digest, my_md5_ctx *ctx)
/* For Apple operating systems: CommonCrypto has the functions we need.
These functions are available on Tiger and later, as well as iOS 2.0
- and later. If you're building for an older cat, well, sorry.
+ and later. If you are building for an older cat, well, sorry.
Declaring the functions as static like this seems to be a bit more
reliable than defining COMMON_DIGEST_FOR_OPENSSL on older cats. */
@@ -254,7 +254,7 @@ static void my_md5_final(unsigned char *digest, my_md5_ctx *ctx)
* Author:
* Alexander Peslyak, better known as Solar Designer <solar at openwall.com>
*
- * This software was written by Alexander Peslyak in 2001. No copyright is
+ * This software was written by Alexander Peslyak in 2001. No copyright is
* claimed, and the software is hereby placed in the public domain.
* In case this attempt to disclaim copyright and place the software in the
* public domain is deemed null and void, then the software is
@@ -264,19 +264,19 @@ static void my_md5_final(unsigned char *digest, my_md5_ctx *ctx)
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
- * There's ABSOLUTELY NO WARRANTY, express or implied.
+ * There is ABSOLUTELY NO WARRANTY, express or implied.
*
* (This is a heavily cut-down "BSD license".)
*
* This differs from Colin Plumb's older public domain implementation in that
* no exactly 32-bit integer data type is required (any 32-bit or wider
- * unsigned integer data type will do), there's no compile-time endianness
- * configuration, and the function prototypes match OpenSSL's. No code from
+ * unsigned integer data type will do), there is no compile-time endianness
+ * configuration, and the function prototypes match OpenSSL's. No code from
* Colin Plumb's implementation has been reused; this comment merely compares
* the properties of the two independent implementations.
*
* The primary goals of this implementation are portability and ease of use.
- * It is meant to be fast, but not as fast as possible. Some known
+ * It is meant to be fast, but not as fast as possible. Some known
* optimizations are not included to reduce source code size and avoid
* compile-time configuration.
*/
@@ -304,16 +304,16 @@ static void my_md5_final(unsigned char *result, my_md5_ctx *ctx);
* architectures that lack an AND-NOT instruction, just like in Colin Plumb's
* implementation.
*/
-#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
-#define G(x, y, z) ((y) ^ ((z) & ((x) ^ (y))))
-#define H(x, y, z) (((x) ^ (y)) ^ (z))
-#define H2(x, y, z) ((x) ^ ((y) ^ (z)))
-#define I(x, y, z) ((y) ^ ((x) | ~(z)))
+#define MD5_F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
+#define MD5_G(x, y, z) ((y) ^ ((z) & ((x) ^ (y))))
+#define MD5_H(x, y, z) (((x) ^ (y)) ^ (z))
+#define MD5_H2(x, y, z) ((x) ^ ((y) ^ (z)))
+#define MD5_I(x, y, z) ((y) ^ ((x) | ~(z)))
/*
* The MD5 transformation for all four rounds.
*/
-#define STEP(f, a, b, c, d, x, t, s) \
+#define MD5_STEP(f, a, b, c, d, x, t, s) \
(a) += f((b), (c), (d)) + (x) + (t); \
(a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s)))); \
(a) += (b);
@@ -323,30 +323,31 @@ static void my_md5_final(unsigned char *result, my_md5_ctx *ctx);
* in a properly aligned word in host byte order.
*
* The check for little-endian architectures that tolerate unaligned
- * memory accesses is just an optimization. Nothing will break if it
- * doesn't work.
+ * memory accesses is just an optimization. Nothing will break if it
+ * does not work.
*/
#if defined(__i386__) || defined(__x86_64__) || defined(__vax__)
-#define SET(n) \
+#define MD5_SET(n) \
(*(MD5_u32plus *)(void *)&ptr[(n) * 4])
-#define GET(n) \
- SET(n)
+#define MD5_GET(n) \
+ MD5_SET(n)
#else
-#define SET(n) \
+#define MD5_SET(n) \
(ctx->block[(n)] = \
(MD5_u32plus)ptr[(n) * 4] | \
((MD5_u32plus)ptr[(n) * 4 + 1] << 8) | \
((MD5_u32plus)ptr[(n) * 4 + 2] << 16) | \
((MD5_u32plus)ptr[(n) * 4 + 3] << 24))
-#define GET(n) \
+#define MD5_GET(n) \
(ctx->block[(n)])
#endif
/*
* This processes one or more 64-byte data blocks, but does NOT update
- * the bit counters. There are no alignment requirements.
+ * the bit counters. There are no alignment requirements.
*/
-static const void *body(my_md5_ctx *ctx, const void *data, unsigned long size)
+static const void *my_md5_body(my_md5_ctx *ctx,
+ const void *data, unsigned long size)
{
const unsigned char *ptr;
MD5_u32plus a, b, c, d;
@@ -367,76 +368,76 @@ static const void *body(my_md5_ctx *ctx, const void *data, unsigned long size)
saved_d = d;
/* Round 1 */
- STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7)
- STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12)
- STEP(F, c, d, a, b, SET(2), 0x242070db, 17)
- STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22)
- STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7)
- STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12)
- STEP(F, c, d, a, b, SET(6), 0xa8304613, 17)
- STEP(F, b, c, d, a, SET(7), 0xfd469501, 22)
- STEP(F, a, b, c, d, SET(8), 0x698098d8, 7)
- STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12)
- STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17)
- STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22)
- STEP(F, a, b, c, d, SET(12), 0x6b901122, 7)
- STEP(F, d, a, b, c, SET(13), 0xfd987193, 12)
- STEP(F, c, d, a, b, SET(14), 0xa679438e, 17)
- STEP(F, b, c, d, a, SET(15), 0x49b40821, 22)
+ MD5_STEP(MD5_F, a, b, c, d, MD5_SET(0), 0xd76aa478, 7)
+ MD5_STEP(MD5_F, d, a, b, c, MD5_SET(1), 0xe8c7b756, 12)
+ MD5_STEP(MD5_F, c, d, a, b, MD5_SET(2), 0x242070db, 17)
+ MD5_STEP(MD5_F, b, c, d, a, MD5_SET(3), 0xc1bdceee, 22)
+ MD5_STEP(MD5_F, a, b, c, d, MD5_SET(4), 0xf57c0faf, 7)
+ MD5_STEP(MD5_F, d, a, b, c, MD5_SET(5), 0x4787c62a, 12)
+ MD5_STEP(MD5_F, c, d, a, b, MD5_SET(6), 0xa8304613, 17)
+ MD5_STEP(MD5_F, b, c, d, a, MD5_SET(7), 0xfd469501, 22)
+ MD5_STEP(MD5_F, a, b, c, d, MD5_SET(8), 0x698098d8, 7)
+ MD5_STEP(MD5_F, d, a, b, c, MD5_SET(9), 0x8b44f7af, 12)
+ MD5_STEP(MD5_F, c, d, a, b, MD5_SET(10), 0xffff5bb1, 17)
+ MD5_STEP(MD5_F, b, c, d, a, MD5_SET(11), 0x895cd7be, 22)
+ MD5_STEP(MD5_F, a, b, c, d, MD5_SET(12), 0x6b901122, 7)
+ MD5_STEP(MD5_F, d, a, b, c, MD5_SET(13), 0xfd987193, 12)
+ MD5_STEP(MD5_F, c, d, a, b, MD5_SET(14), 0xa679438e, 17)
+ MD5_STEP(MD5_F, b, c, d, a, MD5_SET(15), 0x49b40821, 22)
/* Round 2 */
- STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5)
- STEP(G, d, a, b, c, GET(6), 0xc040b340, 9)
- STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14)
- STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20)
- STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5)
- STEP(G, d, a, b, c, GET(10), 0x02441453, 9)
- STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14)
- STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20)
- STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5)
- STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9)
- STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14)
- STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20)
- STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5)
- STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9)
- STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14)
- STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20)
+ MD5_STEP(MD5_G, a, b, c, d, MD5_GET(1), 0xf61e2562, 5)
+ MD5_STEP(MD5_G, d, a, b, c, MD5_GET(6), 0xc040b340, 9)
+ MD5_STEP(MD5_G, c, d, a, b, MD5_GET(11), 0x265e5a51, 14)
+ MD5_STEP(MD5_G, b, c, d, a, MD5_GET(0), 0xe9b6c7aa, 20)
+ MD5_STEP(MD5_G, a, b, c, d, MD5_GET(5), 0xd62f105d, 5)
+ MD5_STEP(MD5_G, d, a, b, c, MD5_GET(10), 0x02441453, 9)
+ MD5_STEP(MD5_G, c, d, a, b, MD5_GET(15), 0xd8a1e681, 14)
+ MD5_STEP(MD5_G, b, c, d, a, MD5_GET(4), 0xe7d3fbc8, 20)
+ MD5_STEP(MD5_G, a, b, c, d, MD5_GET(9), 0x21e1cde6, 5)
+ MD5_STEP(MD5_G, d, a, b, c, MD5_GET(14), 0xc33707d6, 9)
+ MD5_STEP(MD5_G, c, d, a, b, MD5_GET(3), 0xf4d50d87, 14)
+ MD5_STEP(MD5_G, b, c, d, a, MD5_GET(8), 0x455a14ed, 20)
+ MD5_STEP(MD5_G, a, b, c, d, MD5_GET(13), 0xa9e3e905, 5)
+ MD5_STEP(MD5_G, d, a, b, c, MD5_GET(2), 0xfcefa3f8, 9)
+ MD5_STEP(MD5_G, c, d, a, b, MD5_GET(7), 0x676f02d9, 14)
+ MD5_STEP(MD5_G, b, c, d, a, MD5_GET(12), 0x8d2a4c8a, 20)
/* Round 3 */
- STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4)
- STEP(H2, d, a, b, c, GET(8), 0x8771f681, 11)
- STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16)
- STEP(H2, b, c, d, a, GET(14), 0xfde5380c, 23)
- STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4)
- STEP(H2, d, a, b, c, GET(4), 0x4bdecfa9, 11)
- STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16)
- STEP(H2, b, c, d, a, GET(10), 0xbebfbc70, 23)
- STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4)
- STEP(H2, d, a, b, c, GET(0), 0xeaa127fa, 11)
- STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16)
- STEP(H2, b, c, d, a, GET(6), 0x04881d05, 23)
- STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4)
- STEP(H2, d, a, b, c, GET(12), 0xe6db99e5, 11)
- STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16)
- STEP(H2, b, c, d, a, GET(2), 0xc4ac5665, 23)
+ MD5_STEP(MD5_H, a, b, c, d, MD5_GET(5), 0xfffa3942, 4)
+ MD5_STEP(MD5_H2, d, a, b, c, MD5_GET(8), 0x8771f681, 11)
+ MD5_STEP(MD5_H, c, d, a, b, MD5_GET(11), 0x6d9d6122, 16)
+ MD5_STEP(MD5_H2, b, c, d, a, MD5_GET(14), 0xfde5380c, 23)
+ MD5_STEP(MD5_H, a, b, c, d, MD5_GET(1), 0xa4beea44, 4)
+ MD5_STEP(MD5_H2, d, a, b, c, MD5_GET(4), 0x4bdecfa9, 11)
+ MD5_STEP(MD5_H, c, d, a, b, MD5_GET(7), 0xf6bb4b60, 16)
+ MD5_STEP(MD5_H2, b, c, d, a, MD5_GET(10), 0xbebfbc70, 23)
+ MD5_STEP(MD5_H, a, b, c, d, MD5_GET(13), 0x289b7ec6, 4)
+ MD5_STEP(MD5_H2, d, a, b, c, MD5_GET(0), 0xeaa127fa, 11)
+ MD5_STEP(MD5_H, c, d, a, b, MD5_GET(3), 0xd4ef3085, 16)
+ MD5_STEP(MD5_H2, b, c, d, a, MD5_GET(6), 0x04881d05, 23)
+ MD5_STEP(MD5_H, a, b, c, d, MD5_GET(9), 0xd9d4d039, 4)
+ MD5_STEP(MD5_H2, d, a, b, c, MD5_GET(12), 0xe6db99e5, 11)
+ MD5_STEP(MD5_H, c, d, a, b, MD5_GET(15), 0x1fa27cf8, 16)
+ MD5_STEP(MD5_H2, b, c, d, a, MD5_GET(2), 0xc4ac5665, 23)
/* Round 4 */
- STEP(I, a, b, c, d, GET(0), 0xf4292244, 6)
- STEP(I, d, a, b, c, GET(7), 0x432aff97, 10)
- STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15)
- STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21)
- STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6)
- STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10)
- STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15)
- STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21)
- STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6)
- STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10)
- STEP(I, c, d, a, b, GET(6), 0xa3014314, 15)
- STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21)
- STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6)
- STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10)
- STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15)
- STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21)
+ MD5_STEP(MD5_I, a, b, c, d, MD5_GET(0), 0xf4292244, 6)
+ MD5_STEP(MD5_I, d, a, b, c, MD5_GET(7), 0x432aff97, 10)
+ MD5_STEP(MD5_I, c, d, a, b, MD5_GET(14), 0xab9423a7, 15)
+ MD5_STEP(MD5_I, b, c, d, a, MD5_GET(5), 0xfc93a039, 21)
+ MD5_STEP(MD5_I, a, b, c, d, MD5_GET(12), 0x655b59c3, 6)
+ MD5_STEP(MD5_I, d, a, b, c, MD5_GET(3), 0x8f0ccc92, 10)
+ MD5_STEP(MD5_I, c, d, a, b, MD5_GET(10), 0xffeff47d, 15)
+ MD5_STEP(MD5_I, b, c, d, a, MD5_GET(1), 0x85845dd1, 21)
+ MD5_STEP(MD5_I, a, b, c, d, MD5_GET(8), 0x6fa87e4f, 6)
+ MD5_STEP(MD5_I, d, a, b, c, MD5_GET(15), 0xfe2ce6e0, 10)
+ MD5_STEP(MD5_I, c, d, a, b, MD5_GET(6), 0xa3014314, 15)
+ MD5_STEP(MD5_I, b, c, d, a, MD5_GET(13), 0x4e0811a1, 21)
+ MD5_STEP(MD5_I, a, b, c, d, MD5_GET(4), 0xf7537e82, 6)
+ MD5_STEP(MD5_I, d, a, b, c, MD5_GET(11), 0xbd3af235, 10)
+ MD5_STEP(MD5_I, c, d, a, b, MD5_GET(2), 0x2ad7d2bb, 15)
+ MD5_STEP(MD5_I, b, c, d, a, MD5_GET(9), 0xeb86d391, 21)
a += saved_a;
b += saved_b;
@@ -492,11 +493,11 @@ static void my_md5_update(my_md5_ctx *ctx, const void *data,
memcpy(&ctx->buffer[used], data, available);
data = (const unsigned char *)data + available;
size -= available;
- body(ctx, ctx->buffer, 64);
+ my_md5_body(ctx, ctx->buffer, 64);
}
if(size >= 64) {
- data = body(ctx, data, size & ~(unsigned long)0x3f);
+ data = my_md5_body(ctx, data, size & ~(unsigned long)0x3f);
size &= 0x3f;
}
@@ -515,7 +516,7 @@ static void my_md5_final(unsigned char *result, my_md5_ctx *ctx)
if(available < 8) {
memset(&ctx->buffer[used], 0, available);
- body(ctx, ctx->buffer, 64);
+ my_md5_body(ctx, ctx->buffer, 64);
used = 0;
available = 64;
}
@@ -532,7 +533,7 @@ static void my_md5_final(unsigned char *result, my_md5_ctx *ctx)
ctx->buffer[62] = curlx_ultouc((ctx->hi >> 16)&0xff);
ctx->buffer[63] = curlx_ultouc(ctx->hi >> 24);
- body(ctx, ctx->buffer, 64);
+ my_md5_body(ctx, ctx->buffer, 64);
result[0] = curlx_ultouc((ctx->a)&0xff);
result[1] = curlx_ultouc((ctx->a >> 8)&0xff);
diff --git a/libs/libcurl/src/memdebug.c b/libs/libcurl/src/memdebug.c
index 5d1bb4ad55..14a3911e03 100644
--- a/libs/libcurl/src/memdebug.c
+++ b/libs/libcurl/src/memdebug.c
@@ -30,7 +30,7 @@
#include "urldata.h"
-#define MEMDEBUG_NODEFINES /* don't redefine the standard functions */
+#define MEMDEBUG_NODEFINES /* do not redefine the standard functions */
/* The last 3 #include files should be in this order */
#include "curl_printf.h"
@@ -44,8 +44,8 @@ struct memdebug {
double d;
void *p;
} mem[1];
- /* I'm hoping this is the thing with the strictest alignment
- * requirements. That also means we waste some space :-( */
+ /* I am hoping this is the thing with the strictest alignment
+ * requirements. That also means we waste some space :-( */
};
/*
@@ -53,7 +53,7 @@ struct memdebug {
* remain so. For advanced analysis, record a log file and write perl scripts
* to analyze them!
*
- * Don't use these with multithreaded test programs!
+ * Do not use these with multithreaded test programs!
*/
FILE *curl_dbg_logfile = NULL;
@@ -75,7 +75,7 @@ static void curl_dbg_cleanup(void)
curl_dbg_logfile = NULL;
}
-/* this sets the log file name */
+/* this sets the log filename */
void curl_dbg_memdebug(const char *logname)
{
if(!curl_dbg_logfile) {
@@ -84,7 +84,7 @@ void curl_dbg_memdebug(const char *logname)
else
curl_dbg_logfile = stderr;
#ifdef MEMDEBUG_LOG_SYNC
- /* Flush the log file after every line so the log isn't lost in a crash */
+ /* Flush the log file after every line so the log is not lost in a crash */
if(curl_dbg_logfile)
setbuf(curl_dbg_logfile, (char *)NULL);
#endif
@@ -103,7 +103,7 @@ void curl_dbg_memlimit(long limit)
}
}
-/* returns TRUE if this isn't allowed! */
+/* returns TRUE if this is not allowed! */
static bool countcheck(const char *func, int line, const char *source)
{
/* if source is NULL, then the call is made internally and this check
diff --git a/libs/libcurl/src/memdebug.h b/libs/libcurl/src/memdebug.h
index c3c81bad2c..23f378643e 100644
--- a/libs/libcurl/src/memdebug.h
+++ b/libs/libcurl/src/memdebug.h
@@ -137,13 +137,14 @@ CURL_EXTERN int curl_dbg_fclose(FILE *file, int line, const char *source);
#undef socket
#define socket(domain,type,protocol)\
- curl_dbg_socket(domain, type, protocol, __LINE__, __FILE__)
+ curl_dbg_socket((int)domain, type, protocol, __LINE__, __FILE__)
#undef accept /* for those with accept as a macro */
#define accept(sock,addr,len)\
curl_dbg_accept(sock, addr, len, __LINE__, __FILE__)
#ifdef HAVE_SOCKETPAIR
#define socketpair(domain,type,protocol,socket_vector)\
- curl_dbg_socketpair(domain, type, protocol, socket_vector, __LINE__, __FILE__)
+ curl_dbg_socketpair((int)domain, type, protocol, socket_vector, \
+ __LINE__, __FILE__)
#endif
#ifdef HAVE_GETADDRINFO
diff --git a/libs/libcurl/src/mime.c b/libs/libcurl/src/mime.c
index c52fee5207..f3ba1f929d 100644
--- a/libs/libcurl/src/mime.c
+++ b/libs/libcurl/src/mime.c
@@ -1137,7 +1137,7 @@ static void cleanup_part_content(curl_mimepart *part)
part->datasize = (curl_off_t) 0; /* No size yet. */
cleanup_encoder_state(&part->encstate);
part->kind = MIMEKIND_NONE;
- part->flags &= ~MIME_FAST_READ;
+ part->flags &= ~(unsigned int)MIME_FAST_READ;
part->lastreadstatus = 1; /* Successful read status. */
part->state.state = MIMESTATE_BEGIN;
}
@@ -1147,7 +1147,7 @@ static void mime_subparts_free(void *ptr)
curl_mime *mime = (curl_mime *) ptr;
if(mime && mime->parent) {
- mime->parent->freefunc = NULL; /* Be sure we won't be called again. */
+ mime->parent->freefunc = NULL; /* Be sure we will not be called again. */
cleanup_part_content(mime->parent); /* Avoid dangling pointer in part. */
}
curl_mime_free(mime);
@@ -1159,7 +1159,7 @@ static void mime_subparts_unbind(void *ptr)
curl_mime *mime = (curl_mime *) ptr;
if(mime && mime->parent) {
- mime->parent->freefunc = NULL; /* Be sure we won't be called again. */
+ mime->parent->freefunc = NULL; /* Be sure we will not be called again. */
cleanup_part_content(mime->parent); /* Avoid dangling pointer in part. */
mime->parent = NULL;
}
@@ -1186,7 +1186,7 @@ void curl_mime_free(curl_mime *mime)
curl_mimepart *part;
if(mime) {
- mime_subparts_unbind(mime); /* Be sure it's not referenced anymore. */
+ mime_subparts_unbind(mime); /* Be sure it is not referenced anymore. */
while(mime->firstpart) {
part = mime->firstpart;
mime->firstpart = part->nextpart;
@@ -1354,7 +1354,7 @@ CURLcode curl_mime_name(curl_mimepart *part, const char *name)
return CURLE_OK;
}
-/* Set mime part remote file name. */
+/* Set mime part remote filename. */
CURLcode curl_mime_filename(curl_mimepart *part, const char *filename)
{
if(!part)
@@ -1497,7 +1497,7 @@ CURLcode curl_mime_headers(curl_mimepart *part,
if(part->flags & MIME_USERHEADERS_OWNER) {
if(part->userheaders != headers) /* Allow setting twice the same list. */
curl_slist_free_all(part->userheaders);
- part->flags &= ~MIME_USERHEADERS_OWNER;
+ part->flags &= ~(unsigned int)MIME_USERHEADERS_OWNER;
}
part->userheaders = headers;
if(headers && take_ownership)
@@ -1554,7 +1554,7 @@ CURLcode Curl_mime_set_subparts(curl_mimepart *part,
while(root->parent && root->parent->parent)
root = root->parent->parent;
if(subparts == root) {
- /* Can't add as a subpart of itself. */
+ /* cannot add as a subpart of itself. */
return CURLE_BAD_FUNCTION_ARGUMENT;
}
}
@@ -1662,7 +1662,8 @@ static curl_off_t mime_size(curl_mimepart *part)
if(size >= 0 && !(part->flags & MIME_BODY_ONLY)) {
/* Compute total part size. */
size += slist_size(part->curlheaders, 2, NULL, 0);
- size += slist_size(part->userheaders, 2, STRCONST("Content-Type"));
+ size += slist_size(part->userheaders, 2,
+ STRCONST("Content-Type"));
size += 2; /* CRLF after headers. */
}
return size;
@@ -1770,7 +1771,7 @@ CURLcode Curl_mime_prepare_headers(struct Curl_easy *data,
curl_slist_free_all(part->curlheaders);
part->curlheaders = NULL;
- /* Be sure we won't access old headers later. */
+ /* Be sure we will not access old headers later. */
if(part->state.state == MIMESTATE_CURLHEADERS)
mimesetstate(&part->state, MIMESTATE_CURLHEADERS, NULL);
@@ -2071,7 +2072,7 @@ static CURLcode cr_mime_resume_from(struct Curl_easy *data,
return CURLE_PARTIAL_FILE;
}
}
- /* we've passed, proceed as normal */
+ /* we have passed, proceed as normal */
}
return CURLE_OK;
}
@@ -2095,6 +2096,14 @@ static CURLcode cr_mime_unpause(struct Curl_easy *data,
return CURLE_OK;
}
+static bool cr_mime_is_paused(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ struct cr_mime_ctx *ctx = reader->ctx;
+ (void)data;
+ return (ctx->part && ctx->part->lastreadstatus == CURL_READFUNC_PAUSE);
+}
+
static const struct Curl_crtype cr_mime = {
"cr-mime",
cr_mime_init,
@@ -2105,6 +2114,7 @@ static const struct Curl_crtype cr_mime = {
cr_mime_resume_from,
cr_mime_rewind,
cr_mime_unpause,
+ cr_mime_is_paused,
Curl_creader_def_done,
sizeof(struct cr_mime_ctx)
};
diff --git a/libs/libcurl/src/mime.h b/libs/libcurl/src/mime.h
index edb2ee350d..1649d9d2d1 100644
--- a/libs/libcurl/src/mime.h
+++ b/libs/libcurl/src/mime.h
@@ -112,7 +112,7 @@ struct curl_mimepart {
curl_mimepart *nextpart; /* Forward linked list. */
enum mimekind kind; /* The part kind. */
unsigned int flags; /* Flags. */
- char *data; /* Memory data or file name. */
+ char *data; /* Memory data or filename. */
curl_read_callback readfunc; /* Read function. */
curl_seek_callback seekfunc; /* Seek function. */
curl_free_callback freefunc; /* Argument free function. */
@@ -121,7 +121,7 @@ struct curl_mimepart {
struct curl_slist *curlheaders; /* Part headers. */
struct curl_slist *userheaders; /* Part headers. */
char *mimetype; /* Part mime type. */
- char *filename; /* Remote file name. */
+ char *filename; /* Remote filename. */
char *name; /* Data name. */
curl_off_t datasize; /* Expected data size. */
struct mime_state state; /* Current readback state. */
diff --git a/libs/libcurl/src/mprintf.c b/libs/libcurl/src/mprintf.c
index 21d62fa654..24e7981f1a 100644
--- a/libs/libcurl/src/mprintf.c
+++ b/libs/libcurl/src/mprintf.c
@@ -863,7 +863,7 @@ number:
str = (char *)iptr->val.str;
if(!str) {
- /* Write null string if there's space. */
+ /* Write null string if there is space. */
if(prec == -1 || prec >= (int) sizeof(nilstr) - 1) {
str = nilstr;
len = sizeof(nilstr) - 1;
@@ -1040,7 +1040,7 @@ static int addbyter(unsigned char outc, void *f)
{
struct nsprintf *infop = f;
if(infop->length < infop->max) {
- /* only do this if we haven't reached max length yet */
+ /* only do this if we have not reached max length yet */
*infop->buffer++ = (char)outc; /* store */
infop->length++; /* we are now one byte larger */
return 0; /* fputc() returns like this on success */
@@ -1062,10 +1062,10 @@ int curl_mvsnprintf(char *buffer, size_t maxlength, const char *format,
if(info.max) {
/* we terminate this with a zero byte */
if(info.max == info.length) {
- /* we're at maximum, scrap the last letter */
+ /* we are at maximum, scrap the last letter */
info.buffer[-1] = 0;
DEBUGASSERT(retcode);
- retcode--; /* don't count the nul byte */
+ retcode--; /* do not count the nul byte */
}
else
info.buffer[0] = 0;
diff --git a/libs/libcurl/src/mqtt.c b/libs/libcurl/src/mqtt.c
index 9cd56781a5..83d49ce707 100644
--- a/libs/libcurl/src/mqtt.c
+++ b/libs/libcurl/src/mqtt.c
@@ -154,15 +154,15 @@ static int mqtt_getsock(struct Curl_easy *data,
static int mqtt_encode_len(char *buf, size_t len)
{
- unsigned char encoded;
int i;
for(i = 0; (len > 0) && (i<4); i++) {
+ unsigned char encoded;
encoded = len % 0x80;
len /= 0x80;
if(len)
encoded |= 0x80;
- buf[i] = encoded;
+ buf[i] = (char)encoded;
}
return i;
@@ -312,7 +312,7 @@ static CURLcode mqtt_connect(struct Curl_easy *data)
start_user = pos + 3 + MQTT_CLIENTID_LEN;
/* position where starts the password payload */
start_pwd = start_user + ulen;
- /* if user name was provided, add it to the packet */
+ /* if username was provided, add it to the packet */
if(ulen) {
start_pwd += 2;
@@ -585,7 +585,7 @@ static size_t mqtt_decode_len(unsigned char *buf,
return len;
}
-#ifdef CURLDEBUG
+#ifdef DEBUGBUILD
static const char *statenames[]={
"MQTT_FIRST",
"MQTT_REMAINING_LENGTH",
@@ -606,7 +606,7 @@ static void mqstate(struct Curl_easy *data,
{
struct connectdata *conn = data->conn;
struct mqtt_conn *mqtt = &conn->proto.mqtt;
-#ifdef CURLDEBUG
+#ifdef DEBUGBUILD
infof(data, "%s (from %s) (next is %s)",
statenames[state],
statenames[mqtt->state],
@@ -743,7 +743,7 @@ static CURLcode mqtt_doing(struct Curl_easy *data, bool *done)
struct mqtt_conn *mqtt = &conn->proto.mqtt;
struct MQTT *mq = data->req.p.mqtt;
ssize_t nread;
- unsigned char byte;
+ unsigned char recvbyte;
*done = FALSE;
@@ -776,13 +776,13 @@ static CURLcode mqtt_doing(struct Curl_easy *data, bool *done)
FALLTHROUGH();
case MQTT_REMAINING_LENGTH:
do {
- result = Curl_xfer_recv(data, (char *)&byte, 1, &nread);
+ result = Curl_xfer_recv(data, (char *)&recvbyte, 1, &nread);
if(result || !nread)
break;
- Curl_debug(data, CURLINFO_HEADER_IN, (char *)&byte, 1);
- mq->pkt_hd[mq->npacket++] = byte;
- } while((byte & 0x80) && (mq->npacket < 4));
- if(!result && nread && (byte & 0x80))
+ Curl_debug(data, CURLINFO_HEADER_IN, (char *)&recvbyte, 1);
+ mq->pkt_hd[mq->npacket++] = recvbyte;
+ } while((recvbyte & 0x80) && (mq->npacket < 4));
+ if(!result && nread && (recvbyte & 0x80))
/* MQTT supports up to 127 * 128^0 + 127 * 128^1 + 127 * 128^2 +
127 * 128^3 bytes. server tried to send more */
result = CURLE_WEIRD_SERVER_REPLY;
diff --git a/libs/libcurl/src/multi.c b/libs/libcurl/src/multi.c
index c388a3d8d8..fd2ee5238f 100644
--- a/libs/libcurl/src/multi.c
+++ b/libs/libcurl/src/multi.c
@@ -57,7 +57,7 @@
/*
CURL_SOCKET_HASH_TABLE_SIZE should be a prime number. Increasing it from 97
- to 911 takes on a 32-bit machine 4 x 804 = 3211 more bytes. Still, every
+ to 911 takes on a 32-bit machine 4 x 804 = 3211 more bytes. Still, every
CURL handle takes 45-50 K memory, therefore this 3K are not significant.
*/
#ifndef CURL_SOCKET_HASH_TABLE_SIZE
@@ -135,7 +135,7 @@ static void init_completed(struct Curl_easy *data)
{
/* this is a completed transfer */
- /* Important: reset the conn pointer so that we don't point to memory
+ /* Important: reset the conn pointer so that we do not point to memory
that could be freed anytime */
Curl_detach_connection(data);
Curl_expire_clear(data); /* stop all timers */
@@ -175,7 +175,7 @@ static void mstate(struct Curl_easy *data, CURLMstate state
#endif
if(oldstate == state)
- /* don't bother when the new state is the same as the old state */
+ /* do not bother when the new state is the same as the old state */
return;
data->mstate = state;
@@ -191,7 +191,7 @@ static void mstate(struct Curl_easy *data, CURLMstate state
#endif
if(state == MSTATE_COMPLETED) {
- /* changing to COMPLETED means there's one less easy handle 'alive' */
+ /* changing to COMPLETED means there is one less easy handle 'alive' */
DEBUGASSERT(data->multi->num_alive > 0);
data->multi->num_alive--;
if(!data->multi->num_alive) {
@@ -343,7 +343,7 @@ static size_t hash_fd(void *key, size_t key_length, size_t slots_num)
curl_socket_t fd = *((curl_socket_t *) key);
(void) key_length;
- return (fd % slots_num);
+ return (fd % (curl_socket_t)slots_num);
}
/*
@@ -354,12 +354,12 @@ static size_t hash_fd(void *key, size_t key_length, size_t slots_num)
* "Some tests at 7000 and 9000 connections showed that the socket hash lookup
* is somewhat of a bottle neck. Its current implementation may be a bit too
* limiting. It simply has a fixed-size array, and on each entry in the array
- * it has a linked list with entries. So the hash only checks which list to
- * scan through. The code I had used so for used a list with merely 7 slots
- * (as that is what the DNS hash uses) but with 7000 connections that would
- * make an average of 1000 nodes in each list to run through. I upped that to
- * 97 slots (I believe a prime is suitable) and noticed a significant speed
- * increase. I need to reconsider the hash implementation or use a rather
+ * it has a linked list with entries. The hash only checks which list to scan
+ * through. The code I had used so for used a list with merely 7 slots (as
+ * that is what the DNS hash uses) but with 7000 connections that would make
+ * an average of 1000 nodes in each list to run through. I upped that to 97
+ * slots (I believe a prime is suitable) and noticed a significant speed
+ * increase. I need to reconsider the hash implementation or use a rather
* large default value like this. At 9000 connections I was still below 10us
* per call."
*
@@ -370,6 +370,17 @@ static void sh_init(struct Curl_hash *hash, size_t hashsize)
sh_freeentry);
}
+/* multi->proto_hash destructor. Should never be called as elements
+ * MUST be added with their own destructor */
+static void ph_freeentry(void *p)
+{
+ (void)p;
+ /* Will always be FALSE. Cannot use a 0 assert here since compilers
+ * are not in agreement if they then want a NORETURN attribute or
+ * not. *sigh* */
+ DEBUGASSERT(p == NULL);
+}
+
/*
* multi_addmsg()
*
@@ -396,7 +407,10 @@ struct Curl_multi *Curl_multi_handle(size_t hashsize, /* socket hash */
sh_init(&multi->sockhash, hashsize);
- if(Curl_conncache_init(&multi->conn_cache, chashsize))
+ Curl_hash_init(&multi->proto_hash, 23,
+ Curl_hash_str, Curl_str_key_compare, ph_freeentry);
+
+ if(Curl_conncache_init(&multi->conn_cache, multi, chashsize))
goto error;
Curl_llist_init(&multi->msglist, NULL);
@@ -412,14 +426,7 @@ struct Curl_multi *Curl_multi_handle(size_t hashsize, /* socket hash */
goto error;
#else
#ifdef ENABLE_WAKEUP
- if(wakeup_create(multi->wakeup_pair) < 0) {
- multi->wakeup_pair[0] = CURL_SOCKET_BAD;
- multi->wakeup_pair[1] = CURL_SOCKET_BAD;
- }
- else if(curlx_nonblock(multi->wakeup_pair[0], TRUE) < 0 ||
- curlx_nonblock(multi->wakeup_pair[1], TRUE) < 0) {
- wakeup_close(multi->wakeup_pair[0]);
- wakeup_close(multi->wakeup_pair[1]);
+ if(wakeup_create(multi->wakeup_pair, TRUE) < 0) {
multi->wakeup_pair[0] = CURL_SOCKET_BAD;
multi->wakeup_pair[1] = CURL_SOCKET_BAD;
}
@@ -431,6 +438,7 @@ struct Curl_multi *Curl_multi_handle(size_t hashsize, /* socket hash */
error:
sockhash_destroy(&multi->sockhash);
+ Curl_hash_destroy(&multi->proto_hash);
Curl_hash_destroy(&multi->hostcache);
Curl_conncache_destroy(&multi->conn_cache);
free(multi);
@@ -544,10 +552,10 @@ CURLMcode curl_multi_add_handle(struct Curl_multi *multi,
Curl_llist_init(&data->state.timeoutlist, NULL);
/*
- * No failure allowed in this function beyond this point. And no
- * modification of easy nor multi handle allowed before this except for
- * potential multi's connection cache growing which won't be undone in this
- * function no matter what.
+ * No failure allowed in this function beyond this point. No modification of
+ * easy nor multi handle allowed before this except for potential multi's
+ * connection cache growing which will not be undone in this function no
+ * matter what.
*/
if(data->set.errorbuffer)
data->set.errorbuffer[0] = 0;
@@ -684,8 +692,8 @@ static CURLcode multi_done(struct Curl_easy *data,
case CURLE_ABORTED_BY_CALLBACK:
case CURLE_READ_ERROR:
case CURLE_WRITE_ERROR:
- /* When we're aborted due to a callback return code it basically have to
- be counted as premature as there is trouble ahead if we don't. We have
+ /* When we are aborted due to a callback return code it basically have to
+ be counted as premature as there is trouble ahead if we do not. We have
many callbacks and protocols work differently, we could potentially do
this more fine-grained in the future. */
premature = TRUE;
@@ -749,8 +757,8 @@ static CURLcode multi_done(struct Curl_easy *data,
restrictions in our or the server's end
if premature is TRUE, it means this connection was said to be DONE before
- the entire request operation is complete and thus we can't know in what
- state it is for reusing, so we're forced to close it. In a perfect world
+ the entire request operation is complete and thus we cannot know in what
+ state it is for reusing, so we are forced to close it. In a perfect world
we can add code that keep track of if we really must close it here or not,
but currently we have no such detail knowledge.
*/
@@ -863,7 +871,7 @@ CURLMcode curl_multi_remove_handle(struct Curl_multi *multi,
if(data->conn &&
data->mstate > MSTATE_DO &&
data->mstate < MSTATE_COMPLETED) {
- /* Set connection owner so that the DONE function closes it. We can
+ /* Set connection owner so that the DONE function closes it. We can
safely do this here since connection is killed. */
streamclose(data->conn, "Removed with partial response");
}
@@ -872,7 +880,7 @@ CURLMcode curl_multi_remove_handle(struct Curl_multi *multi,
/* multi_done() clears the association between the easy handle and the
connection.
- Note that this ignores the return code simply because there's
+ Note that this ignores the return code simply because there is
nothing really useful to do with it anyway! */
(void)multi_done(data, data->result, premature);
}
@@ -906,7 +914,7 @@ CURLMcode curl_multi_remove_handle(struct Curl_multi *multi,
what we want */
data->mstate = MSTATE_COMPLETED;
- /* This ignores the return code even in case of problems because there's
+ /* This ignores the return code even in case of problems because there is
nothing more to do about that, here */
(void)singlesocket(multi, easy); /* to let the application know what sockets
that vanish with this handle */
@@ -918,7 +926,7 @@ CURLMcode curl_multi_remove_handle(struct Curl_multi *multi,
/* This removes a handle that was part the multi interface that used
CONNECT_ONLY, that connection is now left alive but since this handle
has bits.close set nothing can use that transfer anymore and it is
- forbidden from reuse. And this easy handle cannot find the connection
+ forbidden from reuse. This easy handle cannot find the connection
anymore once removed from the multi handle
Better close the connection here, at once.
@@ -945,12 +953,12 @@ CURLMcode curl_multi_remove_handle(struct Curl_multi *multi,
#endif
/* as this was using a shared connection cache we clear the pointer to that
- since we're not part of that multi handle anymore */
+ since we are not part of that multi handle anymore */
data->state.conn_cache = NULL;
data->multi = NULL; /* clear the association to this multi handle */
- /* make sure there's no pending message in the queue sent from this easy
+ /* make sure there is no pending message in the queue sent from this easy
handle */
for(e = multi->msglist.head; e; e = e->next) {
struct Curl_message *msg = e->ptr;
@@ -1002,8 +1010,9 @@ void Curl_detach_connection(struct Curl_easy *data)
* This is the only function that should assign data->conn
*/
void Curl_attach_connection(struct Curl_easy *data,
- struct connectdata *conn)
+ struct connectdata *conn)
{
+ DEBUGASSERT(data);
DEBUGASSERT(!data->conn);
DEBUGASSERT(conn);
data->conn = conn;
@@ -1016,12 +1025,14 @@ void Curl_attach_connection(struct Curl_easy *data,
static int connecting_getsock(struct Curl_easy *data, curl_socket_t *socks)
{
struct connectdata *conn = data->conn;
- (void)socks;
- /* Not using `conn->sockfd` as `Curl_xfer_setup()` initializes
- * that *after* the connect. */
- if(conn && conn->sock[FIRSTSOCKET] != CURL_SOCKET_BAD) {
+ curl_socket_t sockfd;
+
+ if(!conn)
+ return GETSOCK_BLANK;
+ sockfd = Curl_conn_get_socket(data, FIRSTSOCKET);
+ if(sockfd != CURL_SOCKET_BAD) {
/* Default is to wait to something from the server */
- socks[0] = conn->sock[FIRSTSOCKET];
+ socks[0] = sockfd;
return GETSOCK_READSOCK(0);
}
return GETSOCK_BLANK;
@@ -1030,11 +1041,16 @@ static int connecting_getsock(struct Curl_easy *data, curl_socket_t *socks)
static int protocol_getsock(struct Curl_easy *data, curl_socket_t *socks)
{
struct connectdata *conn = data->conn;
- if(conn && conn->handler->proto_getsock)
+ curl_socket_t sockfd;
+
+ if(!conn)
+ return GETSOCK_BLANK;
+ if(conn->handler->proto_getsock)
return conn->handler->proto_getsock(data, conn, socks);
- else if(conn && conn->sockfd != CURL_SOCKET_BAD) {
+ sockfd = Curl_conn_get_socket(data, FIRSTSOCKET);
+ if(sockfd != CURL_SOCKET_BAD) {
/* Default is to wait to something from the server */
- socks[0] = conn->sockfd;
+ socks[0] = sockfd;
return GETSOCK_READSOCK(0);
}
return GETSOCK_BLANK;
@@ -1043,9 +1059,11 @@ static int protocol_getsock(struct Curl_easy *data, curl_socket_t *socks)
static int domore_getsock(struct Curl_easy *data, curl_socket_t *socks)
{
struct connectdata *conn = data->conn;
- if(conn && conn->handler->domore_getsock)
+ if(!conn)
+ return GETSOCK_BLANK;
+ if(conn->handler->domore_getsock)
return conn->handler->domore_getsock(data, conn, socks);
- else if(conn && conn->sockfd != CURL_SOCKET_BAD) {
+ else if(conn->sockfd != CURL_SOCKET_BAD) {
/* Default is that we want to send something to the server */
socks[0] = conn->sockfd;
return GETSOCK_WRITESOCK(0);
@@ -1056,9 +1074,11 @@ static int domore_getsock(struct Curl_easy *data, curl_socket_t *socks)
static int doing_getsock(struct Curl_easy *data, curl_socket_t *socks)
{
struct connectdata *conn = data->conn;
- if(conn && conn->handler->doing_getsock)
+ if(!conn)
+ return GETSOCK_BLANK;
+ if(conn->handler->doing_getsock)
return conn->handler->doing_getsock(data, conn, socks);
- else if(conn && conn->sockfd != CURL_SOCKET_BAD) {
+ else if(conn->sockfd != CURL_SOCKET_BAD) {
/* Default is that we want to send something to the server */
socks[0] = conn->sockfd;
return GETSOCK_WRITESOCK(0);
@@ -1069,7 +1089,6 @@ static int doing_getsock(struct Curl_easy *data, curl_socket_t *socks)
static int perform_getsock(struct Curl_easy *data, curl_socket_t *sock)
{
struct connectdata *conn = data->conn;
-
if(!conn)
return GETSOCK_BLANK;
else if(conn->handler->perform_getsock)
@@ -1106,6 +1125,7 @@ static int perform_getsock(struct Curl_easy *data, curl_socket_t *sock)
static void multi_getsock(struct Curl_easy *data,
struct easy_pollset *ps)
{
+ bool expect_sockets = TRUE;
/* The no connection case can happen when this is called from
curl_multi_remove_handle() => singlesocket() => multi_getsock().
*/
@@ -1119,11 +1139,14 @@ static void multi_getsock(struct Curl_easy *data,
case MSTATE_SETUP:
case MSTATE_CONNECT:
/* nothing to poll for yet */
+ expect_sockets = FALSE;
break;
case MSTATE_RESOLVING:
Curl_pollset_add_socks(data, ps, Curl_resolv_getsock);
- /* connection filters are not involved in this phase */
+ /* connection filters are not involved in this phase. It's ok if we get no
+ * sockets to wait for. Resolving can wake up from other sources. */
+ expect_sockets = FALSE;
break;
case MSTATE_CONNECTING:
@@ -1157,19 +1180,29 @@ static void multi_getsock(struct Curl_easy *data,
case MSTATE_RATELIMITING:
/* we need to let time pass, ignore socket(s) */
+ expect_sockets = FALSE;
break;
case MSTATE_DONE:
case MSTATE_COMPLETED:
case MSTATE_MSGSENT:
/* nothing more to poll for */
+ expect_sockets = FALSE;
break;
default:
failf(data, "multi_getsock: unexpected multi state %d", data->mstate);
DEBUGASSERT(0);
+ expect_sockets = FALSE;
break;
}
+
+ if(expect_sockets && !ps->num &&
+ !(data->req.keepon & (KEEP_RECV_PAUSE|KEEP_SEND_PAUSE)) &&
+ Curl_conn_is_ip_connected(data, FIRSTSOCKET)) {
+ infof(data, "WARNING: no socket in pollset, transfer may stall!");
+ DEBUGASSERT(0);
+ }
}
CURLMcode curl_multi_fdset(struct Curl_multi *multi,
@@ -1197,7 +1230,7 @@ CURLMcode curl_multi_fdset(struct Curl_multi *multi,
for(i = 0; i < ps.num; i++) {
if(!FDSET_SOCK(ps.sockets[i]))
- /* pretend it doesn't exist */
+ /* pretend it does not exist */
continue;
if(ps.actions[i] & CURL_POLL_IN)
FD_SET(ps.sockets[i], read_fd_set);
@@ -1219,12 +1252,9 @@ CURLMcode curl_multi_waitfds(struct Curl_multi *multi,
unsigned int *fd_count)
{
struct Curl_easy *data;
- unsigned int nfds = 0;
+ struct curl_waitfds cwfds;
struct easy_pollset ps;
- unsigned int i;
CURLMcode result = CURLM_OK;
- struct curl_waitfd *ufd;
- unsigned int j;
if(!ufds)
return CURLM_BAD_FUNCTION_ARGUMENT;
@@ -1235,48 +1265,29 @@ CURLMcode curl_multi_waitfds(struct Curl_multi *multi,
if(multi->in_callback)
return CURLM_RECURSIVE_API_CALL;
+ Curl_waitfds_init(&cwfds, ufds, size);
memset(&ps, 0, sizeof(ps));
for(data = multi->easyp; data; data = data->next) {
multi_getsock(data, &ps);
-
- for(i = 0; i < ps.num; i++) {
- if(nfds < size) {
- curl_socket_t fd = ps.sockets[i];
- int fd_idx = -1;
-
- /* Simple linear search to skip an already added descriptor */
- for(j = 0; j < nfds; j++) {
- if(ufds[j].fd == fd) {
- fd_idx = (int)j;
- break;
- }
- }
-
- if(fd_idx < 0) {
- ufd = &ufds[nfds++];
- ufd->fd = ps.sockets[i];
- ufd->events = 0;
- }
- else
- ufd = &ufds[fd_idx];
-
- if(ps.actions[i] & CURL_POLL_IN)
- ufd->events |= CURL_WAIT_POLLIN;
- if(ps.actions[i] & CURL_POLL_OUT)
- ufd->events |= CURL_WAIT_POLLOUT;
- }
- else
- return CURLM_OUT_OF_MEMORY;
+ if(Curl_waitfds_add_ps(&cwfds, &ps)) {
+ result = CURLM_OUT_OF_MEMORY;
+ goto out;
}
}
+ if(Curl_conncache_add_waitfds(&multi->conn_cache, &cwfds)) {
+ result = CURLM_OUT_OF_MEMORY;
+ goto out;
+ }
+
+out:
if(fd_count)
- *fd_count = nfds;
+ *fd_count = cwfds.n;
return result;
}
#ifdef USE_WINSOCK
-/* Reset FD_WRITE for TCP sockets. Nothing is actually sent. UDP sockets can't
+/* Reset FD_WRITE for TCP sockets. Nothing is actually sent. UDP sockets cannot
* be reset this way because an empty datagram would be sent. #9203
*
* "On Windows the internal state of FD_WRITE as returned from
@@ -1291,29 +1302,6 @@ static void reset_socket_fdwrite(curl_socket_t s)
}
#endif
-static CURLMcode ufds_increase(struct pollfd **pfds, unsigned int *pfds_len,
- unsigned int inc, bool *is_malloced)
-{
- struct pollfd *new_fds, *old_fds = *pfds;
- unsigned int new_len = *pfds_len + inc;
-
- new_fds = calloc(new_len, sizeof(struct pollfd));
- if(!new_fds) {
- if(*is_malloced)
- free(old_fds);
- *pfds = NULL;
- *pfds_len = 0;
- return CURLM_OUT_OF_MEMORY;
- }
- memcpy(new_fds, old_fds, (*pfds_len) * sizeof(struct pollfd));
- if(*is_malloced)
- free(old_fds);
- *pfds = new_fds;
- *pfds_len = new_len;
- *is_malloced = TRUE;
- return CURLM_OK;
-}
-
#define NUM_POLLS_ON_STACK 10
static CURLMcode multi_wait(struct Curl_multi *multi,
@@ -1330,10 +1318,9 @@ static CURLMcode multi_wait(struct Curl_multi *multi,
long timeout_internal;
int retcode = 0;
struct pollfd a_few_on_stack[NUM_POLLS_ON_STACK];
- struct pollfd *ufds = &a_few_on_stack[0];
- unsigned int ufds_len = NUM_POLLS_ON_STACK;
- unsigned int nfds = 0, curl_nfds = 0; /* how many ufds are in use */
- bool ufds_malloc = FALSE;
+ struct curl_pollfds cpfds;
+ unsigned int curl_nfds = 0; /* how many pfds are for curl transfers */
+ CURLMcode result = CURLM_OK;
#ifdef USE_WINSOCK
WSANETWORKEVENTS wsa_events;
DEBUGASSERT(multi->wsa_event != WSA_INVALID_EVENT);
@@ -1351,139 +1338,105 @@ static CURLMcode multi_wait(struct Curl_multi *multi,
if(timeout_ms < 0)
return CURLM_BAD_FUNCTION_ARGUMENT;
- /* If the internally desired timeout is actually shorter than requested from
- the outside, then use the shorter time! But only if the internal timer
- is actually larger than -1! */
- (void)multi_timeout(multi, &timeout_internal);
- if((timeout_internal >= 0) && (timeout_internal < (long)timeout_ms))
- timeout_ms = (int)timeout_internal;
-
- memset(ufds, 0, ufds_len * sizeof(struct pollfd));
+ Curl_pollfds_init(&cpfds, a_few_on_stack, NUM_POLLS_ON_STACK);
memset(&ps, 0, sizeof(ps));
/* Add the curl handles to our pollfds first */
for(data = multi->easyp; data; data = data->next) {
multi_getsock(data, &ps);
-
- for(i = 0; i < ps.num; i++) {
- short events = 0;
-#ifdef USE_WINSOCK
- long mask = 0;
-#endif
- if(ps.actions[i] & CURL_POLL_IN) {
-#ifdef USE_WINSOCK
- mask |= FD_READ|FD_ACCEPT|FD_CLOSE;
-#endif
- events |= POLLIN;
- }
- if(ps.actions[i] & CURL_POLL_OUT) {
-#ifdef USE_WINSOCK
- mask |= FD_WRITE|FD_CONNECT|FD_CLOSE;
- reset_socket_fdwrite(ps.sockets[i]);
-#endif
- events |= POLLOUT;
- }
- if(events) {
- if(nfds && ps.sockets[i] == ufds[nfds-1].fd) {
- ufds[nfds-1].events |= events;
- }
- else {
- if(nfds >= ufds_len) {
- if(ufds_increase(&ufds, &ufds_len, 100, &ufds_malloc))
- return CURLM_OUT_OF_MEMORY;
- }
- DEBUGASSERT(nfds < ufds_len);
- ufds[nfds].fd = ps.sockets[i];
- ufds[nfds].events = events;
- ++nfds;
- }
- }
-#ifdef USE_WINSOCK
- if(mask) {
- if(WSAEventSelect(ps.sockets[i], multi->wsa_event, mask) != 0) {
- if(ufds_malloc)
- free(ufds);
- return CURLM_INTERNAL_ERROR;
- }
- }
-#endif
+ if(Curl_pollfds_add_ps(&cpfds, &ps)) {
+ result = CURLM_OUT_OF_MEMORY;
+ goto out;
}
}
- curl_nfds = nfds; /* what curl internally used in ufds */
+ if(Curl_conncache_add_pollfds(&multi->conn_cache, &cpfds)) {
+ result = CURLM_OUT_OF_MEMORY;
+ goto out;
+ }
+ curl_nfds = cpfds.n; /* what curl internally uses in cpfds */
/* Add external file descriptions from poll-like struct curl_waitfd */
for(i = 0; i < extra_nfds; i++) {
+ unsigned short events = 0;
+ if(extra_fds[i].events & CURL_WAIT_POLLIN)
+ events |= POLLIN;
+ if(extra_fds[i].events & CURL_WAIT_POLLPRI)
+ events |= POLLPRI;
+ if(extra_fds[i].events & CURL_WAIT_POLLOUT)
+ events |= POLLOUT;
+ if(Curl_pollfds_add_sock(&cpfds, extra_fds[i].fd, events)) {
+ result = CURLM_OUT_OF_MEMORY;
+ goto out;
+ }
+ }
+
#ifdef USE_WINSOCK
+ /* Set the WSA events based on the collected pollds */
+ for(i = 0; i < cpfds.n; i++) {
long mask = 0;
- if(extra_fds[i].events & CURL_WAIT_POLLIN)
+ if(cpfds.pfds[i].events & POLLIN)
mask |= FD_READ|FD_ACCEPT|FD_CLOSE;
- if(extra_fds[i].events & CURL_WAIT_POLLPRI)
+ if(cpfds.pfds[i].events & POLLPRI)
mask |= FD_OOB;
- if(extra_fds[i].events & CURL_WAIT_POLLOUT) {
+ if(cpfds.pfds[i].events & POLLOUT) {
mask |= FD_WRITE|FD_CONNECT|FD_CLOSE;
- reset_socket_fdwrite(extra_fds[i].fd);
+ reset_socket_fdwrite(cpfds.pfds[i].fd);
}
- if(WSAEventSelect(extra_fds[i].fd, multi->wsa_event, mask) != 0) {
- if(ufds_malloc)
- free(ufds);
- return CURLM_INTERNAL_ERROR;
- }
-#endif
- if(nfds >= ufds_len) {
- if(ufds_increase(&ufds, &ufds_len, 100, &ufds_malloc))
- return CURLM_OUT_OF_MEMORY;
+ if(mask) {
+ if(WSAEventSelect(cpfds.pfds[i].fd, multi->wsa_event, mask) != 0) {
+ result = CURLM_OUT_OF_MEMORY;
+ goto out;
+ }
}
- DEBUGASSERT(nfds < ufds_len);
- ufds[nfds].fd = extra_fds[i].fd;
- ufds[nfds].events = 0;
- if(extra_fds[i].events & CURL_WAIT_POLLIN)
- ufds[nfds].events |= POLLIN;
- if(extra_fds[i].events & CURL_WAIT_POLLPRI)
- ufds[nfds].events |= POLLPRI;
- if(extra_fds[i].events & CURL_WAIT_POLLOUT)
- ufds[nfds].events |= POLLOUT;
- ++nfds;
}
+#endif
#ifdef ENABLE_WAKEUP
#ifndef USE_WINSOCK
if(use_wakeup && multi->wakeup_pair[0] != CURL_SOCKET_BAD) {
- if(nfds >= ufds_len) {
- if(ufds_increase(&ufds, &ufds_len, 100, &ufds_malloc))
- return CURLM_OUT_OF_MEMORY;
+ if(Curl_pollfds_add_sock(&cpfds, multi->wakeup_pair[0], POLLIN)) {
+ result = CURLM_OUT_OF_MEMORY;
+ goto out;
}
- DEBUGASSERT(nfds < ufds_len);
- ufds[nfds].fd = multi->wakeup_pair[0];
- ufds[nfds].events = POLLIN;
- ++nfds;
}
#endif
#endif
+ /* We check the internal timeout *AFTER* we collected all sockets to
+ * poll. Collecting the sockets may install new timers by protocols
+ * and connection filters.
+ * Use the shorter one of the internal and the caller requested timeout. */
+ (void)multi_timeout(multi, &timeout_internal);
+ if((timeout_internal >= 0) && (timeout_internal < (long)timeout_ms))
+ timeout_ms = (int)timeout_internal;
+
#if defined(ENABLE_WAKEUP) && defined(USE_WINSOCK)
- if(nfds || use_wakeup) {
+ if(cpfds.n || use_wakeup) {
#else
- if(nfds) {
+ if(cpfds.n) {
#endif
int pollrc;
#ifdef USE_WINSOCK
- if(nfds)
- pollrc = Curl_poll(ufds, nfds, 0); /* just pre-check with WinSock */
+ if(cpfds.n) /* just pre-check with WinSock */
+ pollrc = Curl_poll(cpfds.pfds, cpfds.n, 0);
else
pollrc = 0;
#else
- pollrc = Curl_poll(ufds, nfds, timeout_ms); /* wait... */
+ pollrc = Curl_poll(cpfds.pfds, cpfds.n, timeout_ms); /* wait... */
#endif
- if(pollrc < 0)
- return CURLM_UNRECOVERABLE_POLL;
+ if(pollrc < 0) {
+ result = CURLM_UNRECOVERABLE_POLL;
+ goto out;
+ }
if(pollrc > 0) {
retcode = pollrc;
#ifdef USE_WINSOCK
}
else { /* now wait... if not ready during the pre-check (pollrc == 0) */
- WSAWaitForMultipleEvents(1, &multi->wsa_event, FALSE, timeout_ms, FALSE);
+ WSAWaitForMultipleEvents(1, &multi->wsa_event, FALSE, (DWORD)timeout_ms,
+ FALSE);
}
/* With WinSock, we have to run the following section unconditionally
to call WSAEventSelect(fd, event, 0) on all the sockets */
@@ -1493,7 +1446,7 @@ static CURLMcode multi_wait(struct Curl_multi *multi,
struct, the bit values of the actual underlying poll() implementation
may not be the same as the ones in the public libcurl API! */
for(i = 0; i < extra_nfds; i++) {
- unsigned r = ufds[curl_nfds + i].revents;
+ unsigned r = (unsigned)cpfds.pfds[curl_nfds + i].revents;
unsigned short mask = 0;
#ifdef USE_WINSOCK
curl_socket_t s = extra_fds[i].fd;
@@ -1510,7 +1463,7 @@ static CURLMcode multi_wait(struct Curl_multi *multi,
}
WSAEventSelect(s, multi->wsa_event, 0);
if(!pollrc) {
- extra_fds[i].revents = mask;
+ extra_fds[i].revents = (short)mask;
continue;
}
#endif
@@ -1520,7 +1473,7 @@ static CURLMcode multi_wait(struct Curl_multi *multi,
mask |= CURL_WAIT_POLLOUT;
if(r & POLLPRI)
mask |= CURL_WAIT_POLLPRI;
- extra_fds[i].revents = mask;
+ extra_fds[i].revents = (short)mask;
}
#ifdef USE_WINSOCK
@@ -1547,7 +1500,7 @@ static CURLMcode multi_wait(struct Curl_multi *multi,
#else
#ifdef ENABLE_WAKEUP
if(use_wakeup && multi->wakeup_pair[0] != CURL_SOCKET_BAD) {
- if(ufds[curl_nfds + extra_nfds].revents & POLLIN) {
+ if(cpfds.pfds[curl_nfds + extra_nfds].revents & POLLIN) {
char buf[64];
ssize_t nread;
while(1) {
@@ -1571,18 +1524,16 @@ static CURLMcode multi_wait(struct Curl_multi *multi,
}
}
- if(ufds_malloc)
- free(ufds);
if(ret)
*ret = retcode;
#if defined(ENABLE_WAKEUP) && defined(USE_WINSOCK)
- if(extrawait && !nfds && !use_wakeup) {
+ if(extrawait && !cpfds.n && !use_wakeup) {
#else
- if(extrawait && !nfds) {
+ if(extrawait && !cpfds.n) {
#endif
long sleep_ms = 0;
- /* Avoid busy-looping when there's nothing particular to wait for */
+ /* Avoid busy-looping when there is nothing particular to wait for */
if(!curl_multi_timeout(multi, &sleep_ms) && sleep_ms) {
if(sleep_ms > timeout_ms)
sleep_ms = timeout_ms;
@@ -1594,7 +1545,9 @@ static CURLMcode multi_wait(struct Curl_multi *multi,
}
}
- return CURLM_OK;
+out:
+ Curl_pollfds_cleanup(&cpfds);
+ return result;
}
CURLMcode curl_multi_wait(struct Curl_multi *multi,
@@ -1623,6 +1576,15 @@ CURLMcode curl_multi_wakeup(struct Curl_multi *multi)
it has to be careful only to access parts of the
Curl_multi struct that are constant */
+#if defined(ENABLE_WAKEUP) && !defined(USE_WINSOCK)
+#ifdef USE_EVENTFD
+ const void *buf;
+ const uint64_t val = 1;
+#else
+ char buf[1];
+#endif
+#endif
+
/* GOOD_MULTI_HANDLE can be safely called */
if(!GOOD_MULTI_HANDLE(multi))
return CURLM_BAD_HANDLE;
@@ -1636,8 +1598,11 @@ CURLMcode curl_multi_wakeup(struct Curl_multi *multi)
making it safe to access from another thread after the init part
and before cleanup */
if(multi->wakeup_pair[1] != CURL_SOCKET_BAD) {
- char buf[1];
+#ifdef USE_EVENTFD
+ buf = &val;
+#else
buf[0] = 1;
+#endif
while(1) {
/* swrite() is not thread-safe in general, because concurrent calls
can have their messages interleaved, but in this case the content
@@ -1646,7 +1611,7 @@ CURLMcode curl_multi_wakeup(struct Curl_multi *multi)
The write socket is set to non-blocking, this way this function
cannot block, making it safe to call even from the same thread
that will call curl_multi_wait(). If swrite() returns that it
- would block, it's considered successful because it means that
+ would block, it is considered successful because it means that
previous calls to this function will wake up the poll(). */
if(wakeup_write(multi->wakeup_pair[1], buf, sizeof(buf)) < 0) {
int err = SOCKERRNO;
@@ -1710,7 +1675,7 @@ CURLMcode Curl_multi_add_perform(struct Curl_multi *multi,
if(!rc) {
struct SingleRequest *k = &data->req;
- /* pass in NULL for 'conn' here since we don't want to init the
+ /* pass in NULL for 'conn' here since we do not want to init the
connection, only this transfer */
Curl_init_do(data, NULL);
@@ -1742,7 +1707,7 @@ static CURLcode multi_do(struct Curl_easy *data, bool *done)
* second connection.
*
* 'complete' can return 0 for incomplete, 1 for done and -1 for go back to
- * DOING state there's more work to do!
+ * DOING state there is more work to do!
*/
static CURLcode multi_do_more(struct Curl_easy *data, int *complete)
@@ -1870,10 +1835,10 @@ static CURLcode protocol_connect(struct Curl_easy *data,
&& conn->bits.protoconnstart) {
/* We already are connected, get back. This may happen when the connect
worked fine in the first call, like when we connect to a local server
- or proxy. Note that we don't know if the protocol is actually done.
+ or proxy. Note that we do not know if the protocol is actually done.
- Unless this protocol doesn't have any protocol-connect callback, as
- then we know we're done. */
+ Unless this protocol does not have any protocol-connect callback, as
+ then we know we are done. */
if(!conn->handler->connecting)
*protocol_done = TRUE;
@@ -1890,7 +1855,7 @@ static CURLcode protocol_connect(struct Curl_easy *data,
else
*protocol_done = TRUE;
- /* it has started, possibly even completed but that knowledge isn't stored
+ /* it has started, possibly even completed but that knowledge is not stored
in this bit! */
if(!result)
conn->bits.protoconnstart = TRUE;
@@ -1904,6 +1869,20 @@ static void set_in_callback(struct Curl_multi *multi, bool value)
multi->in_callback = value;
}
+/*
+ * posttransfer() is called immediately after a transfer ends
+ */
+static void multi_posttransfer(struct Curl_easy *data)
+{
+#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
+ /* restore the signal handler for SIGPIPE before we get back */
+ if(!data->set.no_signal)
+ signal(SIGPIPE, data->state.prev_signal);
+#else
+ (void)data; /* unused parameter */
+#endif
+}
+
static CURLMcode multi_runsingle(struct Curl_multi *multi,
struct curltime *nowp,
struct Curl_easy *data)
@@ -1926,7 +1905,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
/* a multi-level callback returned error before, meaning every individual
transfer now has failed */
result = CURLE_ABORTED_BY_CALLBACK;
- Curl_posttransfer(data);
+ multi_posttransfer(data);
multi_done(data, result, FALSE);
multistate(data, MSTATE_COMPLETED);
}
@@ -2009,7 +1988,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
if(!result) {
*nowp = Curl_pgrsTime(data, TIMER_POSTQUEUE);
if(async)
- /* We're now waiting for an asynchronous name lookup */
+ /* We are now waiting for an asynchronous name lookup */
multistate(data, MSTATE_RESOLVING);
else {
/* after the connect has been sent off, go WAITCONNECT unless the
@@ -2062,7 +2041,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
/* Update sockets here, because the socket(s) may have been
closed and the application thus needs to be told, even if it
is likely that the same socket(s) will again be used further
- down. If the name has not yet been resolved, it is likely
+ down. If the name has not yet been resolved, it is likely
that new sockets have been opened in an attempt to contact
another resolver. */
rc = singlesocket(multi, data);
@@ -2132,7 +2111,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
}
else if(result) {
/* failure detected */
- Curl_posttransfer(data);
+ multi_posttransfer(data);
multi_done(data, result, TRUE);
stream_error = TRUE;
break;
@@ -2162,7 +2141,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
}
else {
/* failure detected */
- Curl_posttransfer(data);
+ multi_posttransfer(data);
multi_done(data, result, TRUE);
stream_error = TRUE;
}
@@ -2178,7 +2157,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
}
else if(result) {
/* failure detected */
- Curl_posttransfer(data);
+ multi_posttransfer(data);
multi_done(data, result, TRUE);
stream_error = TRUE;
}
@@ -2198,9 +2177,10 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
Curl_set_in_callback(data, false);
if(prereq_rc != CURL_PREREQFUNC_OK) {
failf(data, "operation aborted by pre-request callback");
- /* failure in pre-request callback - don't do any other processing */
+ /* failure in pre-request callback - do not do any other
+ processing */
result = CURLE_ABORTED_BY_CALLBACK;
- Curl_posttransfer(data);
+ multi_posttransfer(data);
multi_done(data, result, FALSE);
stream_error = TRUE;
break;
@@ -2230,7 +2210,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
/* skip some states if it is important */
multi_done(data, CURLE_OK, FALSE);
- /* if there's no connection left, skip the DONE state */
+ /* if there is no connection left, skip the DONE state */
multistate(data, data->conn ?
MSTATE_DONE : MSTATE_COMPLETED);
rc = CURLM_CALL_MULTI_PERFORM;
@@ -2246,13 +2226,13 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
/* after DO, go DO_DONE... or DO_MORE */
else if(data->conn->bits.do_more) {
- /* we're supposed to do more, but we need to sit down, relax
+ /* we are supposed to do more, but we need to sit down, relax
and wait a little while first */
multistate(data, MSTATE_DOING_MORE);
rc = CURLM_CALL_MULTI_PERFORM;
}
else {
- /* we're done with the DO, now DID */
+ /* we are done with the DO, now DID */
multistate(data, MSTATE_DID);
rc = CURLM_CALL_MULTI_PERFORM;
}
@@ -2261,7 +2241,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
data->conn->bits.reuse) {
/*
* In this situation, a connection that we were trying to use
- * may have unexpectedly died. If possible, send the connection
+ * may have unexpectedly died. If possible, send the connection
* back to the CONNECT phase so we can try again.
*/
char *newurl = NULL;
@@ -2275,7 +2255,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
stream_error = TRUE;
}
- Curl_posttransfer(data);
+ multi_posttransfer(data);
drc = multi_done(data, result, FALSE);
/* When set to retry the connection, we must go back to the CONNECT
@@ -2295,19 +2275,19 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
}
}
else {
- /* done didn't return OK or SEND_ERROR */
+ /* done did not return OK or SEND_ERROR */
result = drc;
}
}
else {
- /* Have error handler disconnect conn if we can't retry */
+ /* Have error handler disconnect conn if we cannot retry */
stream_error = TRUE;
}
free(newurl);
}
else {
/* failure detected */
- Curl_posttransfer(data);
+ multi_posttransfer(data);
if(data->conn)
multi_done(data, result, FALSE);
stream_error = TRUE;
@@ -2329,7 +2309,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
}
else {
/* failure detected */
- Curl_posttransfer(data);
+ multi_posttransfer(data);
multi_done(data, result, FALSE);
stream_error = TRUE;
}
@@ -2355,7 +2335,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
}
else {
/* failure detected */
- Curl_posttransfer(data);
+ multi_posttransfer(data);
multi_done(data, result, FALSE);
stream_error = TRUE;
}
@@ -2367,7 +2347,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
/* Check if we can move pending requests to send pipe */
process_pending_handles(multi); /* multiplexed */
- /* Only perform the transfer if there's a good socket to work with.
+ /* Only perform the transfer if there is a good socket to work with.
Having both BAD is a signal to skip immediately to DONE */
if((data->conn->sockfd != CURL_SOCKET_BAD) ||
(data->conn->writesockfd != CURL_SOCKET_BAD))
@@ -2397,7 +2377,7 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
result != CURLE_HTTP2_STREAM)
streamclose(data->conn, "Transfer returned error");
- Curl_posttransfer(data);
+ multi_posttransfer(data);
multi_done(data, result, TRUE);
}
else {
@@ -2509,8 +2489,8 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
if(result) {
/*
* The transfer phase returned error, we mark the connection to get
- * closed to prevent being reused. This is because we can't possibly
- * know if the connection is in a good shape or not now. Unless it is
+ * closed to prevent being reused. This is because we cannot possibly
+ * know if the connection is in a good shape or not now. Unless it is
* a protocol which uses two "channels" like FTP, as then the error
* happened in the data connection.
*/
@@ -2519,13 +2499,13 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
result != CURLE_HTTP2_STREAM)
streamclose(data->conn, "Transfer returned error");
- Curl_posttransfer(data);
+ multi_posttransfer(data);
multi_done(data, result, TRUE);
}
else if(data->req.done && !Curl_cwriter_is_paused(data)) {
/* call this even if the readwrite function returned error */
- Curl_posttransfer(data);
+ multi_posttransfer(data);
/* When we follow redirects or is set to retry the connection, we must
to go back to the CONNECT state */
@@ -2552,8 +2532,8 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
else {
/* after the transfer is done, go DONE */
- /* but first check to see if we got a location info even though we're
- not following redirects */
+ /* but first check to see if we got a location info even though we
+ are not following redirects */
if(data->req.location) {
free(newurl);
newurl = data->req.location;
@@ -2571,10 +2551,10 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
}
}
}
- else if(data->state.select_bits) {
+ else if(data->state.select_bits && !Curl_xfer_is_blocked(data)) {
/* This avoids CURLM_CALL_MULTI_PERFORM so that a very fast transfer
- won't get stuck on this transfer at the expense of other concurrent
- transfers */
+ will not get stuck on this transfer at the expense of other
+ concurrent transfers */
Curl_expire(data, 0, EXPIRE_RUN_NOW);
}
free(newurl);
@@ -2610,8 +2590,8 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
}
}
#endif
- /* after we have DONE what we're supposed to do, go COMPLETED, and
- it doesn't matter what the multi_done() returned! */
+ /* after we have DONE what we are supposed to do, go COMPLETED, and
+ it does not matter what the multi_done() returned! */
multistate(data, MSTATE_COMPLETED);
break;
@@ -2646,7 +2626,7 @@ statemachine_end:
if(data->mstate < MSTATE_COMPLETED) {
if(result) {
/*
- * If an error was returned, and we aren't in completed state now,
+ * If an error was returned, and we are not in completed state now,
* then we go to completed and consider this transfer aborted.
*/
@@ -2658,12 +2638,12 @@ statemachine_end:
if(data->conn) {
if(stream_error) {
- /* Don't attempt to send data over a connection that timed out */
+ /* Do not attempt to send data over a connection that timed out */
bool dead_connection = result == CURLE_OPERATION_TIMEDOUT;
struct connectdata *conn = data->conn;
/* This is where we make sure that the conn pointer is reset.
- We don't have to do this in every case block above where a
+ We do not have to do this in every case block above where a
failure is detected */
Curl_detach_connection(data);
@@ -2676,13 +2656,14 @@ statemachine_end:
}
else if(data->mstate == MSTATE_CONNECT) {
/* Curl_connect() failed */
- (void)Curl_posttransfer(data);
+ multi_posttransfer(data);
+ Curl_pgrsUpdate_nometer(data);
}
multistate(data, MSTATE_COMPLETED);
rc = CURLM_CALL_MULTI_PERFORM;
}
- /* if there's still a connection to use, call the progress function */
+ /* if there is still a connection to use, call the progress function */
else if(data->conn && Curl_pgrsUpdate(data)) {
/* aborted due to progress callback return code must close the
connection */
@@ -2752,6 +2733,7 @@ CURLMcode curl_multi_perform(struct Curl_multi *multi, int *running_handles)
/* the current node might be unlinked in multi_runsingle(), get the next
pointer now */
struct Curl_easy *datanext = data->next;
+
if(data->set.no_signal != nosig) {
sigpipe_restore(&pipe_st);
sigpipe_ignore(data, &pipe_st);
@@ -2760,11 +2742,14 @@ CURLMcode curl_multi_perform(struct Curl_multi *multi, int *running_handles)
result = multi_runsingle(multi, &now, data);
if(result)
returncode = result;
+
data = datanext; /* operate on next handle */
} while(data);
sigpipe_restore(&pipe_st);
}
+ Curl_conncache_multi_perform(multi);
+
/*
* Simply remove all expired timers from the splay since handles are dealt
* with unconditionally by this function and curl_multi_timeout() requires
@@ -2793,7 +2778,7 @@ CURLMcode curl_multi_perform(struct Curl_multi *multi, int *running_handles)
}
} while(t);
- *running_handles = multi->num_alive;
+ *running_handles = (int)multi->num_alive;
if(CURLM_OK >= returncode)
returncode = Curl_update_timer(multi);
@@ -2829,6 +2814,9 @@ CURLMcode curl_multi_cleanup(struct Curl_multi *multi)
/* First remove all remaining easy handles */
data = multi->easyp;
while(data) {
+ if(!GOOD_EASY_HANDLE(data))
+ return CURLM_BAD_HANDLE;
+
nextdata = data->next;
if(!data->state.done && data->conn)
/* if DONE was never called for this handle */
@@ -2853,9 +2841,10 @@ CURLMcode curl_multi_cleanup(struct Curl_multi *multi)
}
/* Close all the connections in the connection cache */
- Curl_conncache_close_all_connections(&multi->conn_cache);
+ Curl_conncache_multi_close_all(multi);
sockhash_destroy(&multi->sockhash);
+ Curl_hash_destroy(&multi->proto_hash);
Curl_conncache_destroy(&multi->conn_cache);
Curl_hash_destroy(&multi->hostcache);
Curl_psl_destroy(&multi->psl);
@@ -2865,12 +2854,10 @@ CURLMcode curl_multi_cleanup(struct Curl_multi *multi)
#else
#ifdef ENABLE_WAKEUP
wakeup_close(multi->wakeup_pair[0]);
+#ifndef USE_EVENTFD
wakeup_close(multi->wakeup_pair[1]);
#endif
#endif
-
-#ifdef USE_SSL
- Curl_free_multi_ssl_backend_data(multi->ssl_backend_data);
#endif
multi_xfer_bufs_free(multi);
@@ -2927,41 +2914,54 @@ static CURLMcode singlesocket(struct Curl_multi *multi,
struct Curl_easy *data)
{
struct easy_pollset cur_poll;
- unsigned int i;
- struct Curl_sh_entry *entry;
- curl_socket_t s;
- int rc;
+ CURLMcode mresult;
/* Fill in the 'current' struct with the state as it is now: what sockets to
supervise and for what actions */
multi_getsock(data, &cur_poll);
+ mresult = Curl_multi_pollset_ev(multi, data, &cur_poll, &data->last_poll);
+
+ if(!mresult) /* Remember for next time */
+ memcpy(&data->last_poll, &cur_poll, sizeof(cur_poll));
+ return mresult;
+}
+
+CURLMcode Curl_multi_pollset_ev(struct Curl_multi *multi,
+ struct Curl_easy *data,
+ struct easy_pollset *ps,
+ struct easy_pollset *last_ps)
+{
+ unsigned int i;
+ struct Curl_sh_entry *entry;
+ curl_socket_t s;
+ int rc;
/* We have 0 .. N sockets already and we get to know about the 0 .. M
sockets we should have from now on. Detect the differences, remove no
longer supervised ones and add new ones */
/* walk over the sockets we got right now */
- for(i = 0; i < cur_poll.num; i++) {
- unsigned char cur_action = cur_poll.actions[i];
+ for(i = 0; i < ps->num; i++) {
+ unsigned char cur_action = ps->actions[i];
unsigned char last_action = 0;
int comboaction;
- s = cur_poll.sockets[i];
+ s = ps->sockets[i];
/* get it from the hash */
entry = sh_getentry(&multi->sockhash, s);
if(entry) {
/* check if new for this transfer */
unsigned int j;
- for(j = 0; j< data->last_poll.num; j++) {
- if(s == data->last_poll.sockets[j]) {
- last_action = data->last_poll.actions[j];
+ for(j = 0; j< last_ps->num; j++) {
+ if(s == last_ps->sockets[j]) {
+ last_action = last_ps->actions[j];
break;
}
}
}
else {
- /* this is a socket we didn't have before, add it to the hash! */
+ /* this is a socket we did not have before, add it to the hash! */
entry = sh_addentry(&multi->sockhash, s);
if(!entry)
/* fatal */
@@ -2978,14 +2978,15 @@ static CURLMcode singlesocket(struct Curl_multi *multi,
if(cur_action & CURL_POLL_OUT)
entry->writers++;
}
- else if(!last_action) {
+ else if(!last_action &&
+ !Curl_hash_pick(&entry->transfers, (char *)&data, /* hash key */
+ sizeof(struct Curl_easy *))) {
/* a new transfer using this socket */
entry->users++;
if(cur_action & CURL_POLL_IN)
entry->readers++;
if(cur_action & CURL_POLL_OUT)
entry->writers++;
-
/* add 'data' to the transfer hash on this socket! */
if(!Curl_hash_add(&entry->transfers, (char *)&data, /* hash key */
sizeof(struct Curl_easy *), data)) {
@@ -3014,18 +3015,19 @@ static CURLMcode singlesocket(struct Curl_multi *multi,
}
}
- entry->action = comboaction; /* store the current action state */
+ /* store the current action state */
+ entry->action = (unsigned int)comboaction;
}
- /* Check for last_poll.sockets that no longer appear in cur_poll.sockets.
+ /* Check for last_poll.sockets that no longer appear in ps->sockets.
* Need to remove the easy handle from the multi->sockhash->transfers and
* remove multi->sockhash entry when this was the last transfer */
- for(i = 0; i< data->last_poll.num; i++) {
+ for(i = 0; i < last_ps->num; i++) {
unsigned int j;
bool stillused = FALSE;
- s = data->last_poll.sockets[i];
- for(j = 0; j < cur_poll.num; j++) {
- if(s == cur_poll.sockets[j]) {
+ s = last_ps->sockets[i];
+ for(j = 0; j < ps->num; j++) {
+ if(s == ps->sockets[j]) {
/* this is still supervised */
stillused = TRUE;
break;
@@ -3038,7 +3040,7 @@ static CURLMcode singlesocket(struct Curl_multi *multi,
/* if this is NULL here, the socket has been closed and notified so
already by Curl_multi_closed() */
if(entry) {
- unsigned char oldactions = data->last_poll.actions[i];
+ unsigned char oldactions = last_ps->actions[i];
/* this socket has been removed. Decrease user count */
entry->users--;
if(oldactions & CURL_POLL_OUT)
@@ -3068,8 +3070,6 @@ static CURLMcode singlesocket(struct Curl_multi *multi,
}
} /* for loop over num */
- /* Remember for next time */
- memcpy(&data->last_poll, &cur_poll, sizeof(data->last_poll));
return CURLM_OK;
}
@@ -3085,7 +3085,7 @@ CURLcode Curl_updatesocket(struct Curl_easy *data)
* Curl_multi_closed()
*
* Used by the connect code to tell the multi_socket code that one of the
- * sockets we were using is about to be closed. This function will then
+ * sockets we were using is about to be closed. This function will then
* remove it from the sockethash for this handle to make the multi_socket API
* behave properly, especially for the case when libcurl will create another
* socket again and it gets the same file descriptor number.
@@ -3094,7 +3094,7 @@ CURLcode Curl_updatesocket(struct Curl_easy *data)
void Curl_multi_closed(struct Curl_easy *data, curl_socket_t s)
{
if(data) {
- /* if there's still an easy handle associated with this connection */
+ /* if there is still an easy handle associated with this connection */
struct Curl_multi *multi = data->multi;
if(multi) {
/* this is set if this connection is part of a handle that is added to
@@ -3170,7 +3170,7 @@ static CURLMcode add_next_timeout(struct curltime now,
/* copy the first entry to 'tv' */
memcpy(tv, &node->time, sizeof(*tv));
- /* Insert this node again into the splay. Keep the timer in the list in
+ /* Insert this node again into the splay. Keep the timer in the list in
case we need to recompute future timers. */
multi->timetree = Curl_splayinsert(*tv, multi->timetree,
&d->state.timenode);
@@ -3212,13 +3212,16 @@ static CURLMcode multi_socket(struct Curl_multi *multi,
if(s != CURL_SOCKET_TIMEOUT) {
struct Curl_sh_entry *entry = sh_getentry(&multi->sockhash, s);
- if(!entry)
- /* Unmatched socket, we can't act on it but we ignore this fact. In
+ if(!entry) {
+ /* Unmatched socket, we cannot act on it but we ignore this fact. In
real-world tests it has been proved that libevent can in fact give
the application actions even though the socket was just previously
asked to get removed, so thus we better survive stray socket actions
and just move on. */
- ;
+ /* The socket might come from a connection that is being shut down
+ * by the multi's conncache. */
+ Curl_conncache_multi_socket(multi, s, ev_bitmask);
+ }
else {
struct Curl_hash_iterator iter;
struct Curl_hash_element *he;
@@ -3232,18 +3235,18 @@ static CURLMcode multi_socket(struct Curl_multi *multi,
DEBUGASSERT(data->magic == CURLEASY_MAGIC_NUMBER);
if(data->conn && !(data->conn->handler->flags & PROTOPT_DIRLOCK))
- /* set socket event bitmask if they're not locked */
+ /* set socket event bitmask if they are not locked */
data->state.select_bits |= (unsigned char)ev_bitmask;
Curl_expire(data, 0, EXPIRE_RUN_NOW);
}
- /* Now we fall-through and do the timer-based stuff, since we don't want
+ /* Now we fall-through and do the timer-based stuff, since we do not want
to force the user to have to deal with timeouts as long as at least
one connection in fact has traffic. */
data = NULL; /* set data to NULL again to avoid calling
- multi_runsingle() in case there's no need to */
+ multi_runsingle() in case there is no need to */
now = Curl_now(); /* get a newer time since the multi_runsingle() loop
may have taken some time */
}
@@ -3286,7 +3289,7 @@ static CURLMcode multi_socket(struct Curl_multi *multi,
}
}
- /* Check if there's one (more) expired timer to deal with! This function
+ /* Check if there is one (more) expired timer to deal with! This function
extracts a matching node if there is one */
multi->timetree = Curl_splaygetbest(now, multi->timetree, &t);
@@ -3299,7 +3302,7 @@ static CURLMcode multi_socket(struct Curl_multi *multi,
if(first)
sigpipe_restore(&pipe_st);
- *running_handles = multi->num_alive;
+ *running_handles = (int)multi->num_alive;
return result;
}
@@ -3351,6 +3354,9 @@ CURLMcode curl_multi_setopt(struct Curl_multi *multi,
break;
case CURLMOPT_MAX_TOTAL_CONNECTIONS:
multi->max_total_connections = va_arg(param, long);
+ /* for now, let this also decide the max number of connections
+ * in shutdown handling */
+ multi->max_shutdown_connections = va_arg(param, long);
break;
/* options formerly used for pipelining */
case CURLMOPT_MAX_PIPELINE_LENGTH:
@@ -3437,7 +3443,7 @@ static CURLMcode multi_timeout(struct Curl_multi *multi,
if(Curl_splaycomparekeys(multi->timetree->key, now) > 0) {
/* some time left before expiration */
timediff_t diff = Curl_timediff_ceil(multi->timetree->key, now);
- /* this should be safe even on 32 bit archs, as we don't use that
+ /* this should be safe even on 32-bit archs, as we do not use that
overly long timeouts */
*timeout_ms = (long)diff;
}
@@ -3482,7 +3488,7 @@ CURLMcode Curl_update_timer(struct Curl_multi *multi)
static const struct curltime none = {0, 0};
if(Curl_splaycomparekeys(none, multi->timer_lastcall)) {
multi->timer_lastcall = none;
- /* there's no timeout now but there was one previously, tell the app to
+ /* there is no timeout now but there was one previously, tell the app to
disable it */
set_in_callback(multi, TRUE);
rc = multi->timer_cb(multi, -1, multi->timer_userp);
@@ -3603,8 +3609,8 @@ void Curl_expire(struct Curl_easy *data, timediff_t milli, expire_id id)
DEBUGASSERT(id < EXPIRE_LAST);
set = Curl_now();
- set.tv_sec += (time_t)(milli/1000); /* might be a 64 to 32 bit conversion */
- set.tv_usec += (unsigned int)(milli%1000)*1000;
+ set.tv_sec += (time_t)(milli/1000); /* might be a 64 to 32 bits conversion */
+ set.tv_usec += (int)(milli%1000)*1000;
if(set.tv_usec >= 1000000) {
set.tv_sec++;
@@ -3614,7 +3620,7 @@ void Curl_expire(struct Curl_easy *data, timediff_t milli, expire_id id)
/* Remove any timer with the same id just in case. */
multi_deltimeout(data, id);
- /* Add it to the timer list. It must stay in the list until it has expired
+ /* Add it to the timer list. It must stay in the list until it has expired
in case we need to recompute the minimum timer later. */
multi_addtimeout(data, &set, id);
@@ -3627,7 +3633,7 @@ void Curl_expire(struct Curl_easy *data, timediff_t milli, expire_id id)
if(diff > 0) {
/* The current splay tree entry is sooner than this new expiry time.
- We don't need to update our splay tree entry. */
+ We do not need to update our splay tree entry. */
return;
}
@@ -3718,12 +3724,12 @@ CURLMcode curl_multi_assign(struct Curl_multi *multi, curl_socket_t s,
size_t Curl_multi_max_host_connections(struct Curl_multi *multi)
{
- return multi ? multi->max_host_connections : 0;
+ return multi ? (size_t)multi->max_host_connections : 0;
}
size_t Curl_multi_max_total_connections(struct Curl_multi *multi)
{
- return multi ? multi->max_total_connections : 0;
+ return multi ? (size_t)multi->max_total_connections : 0;
}
/*
diff --git a/libs/libcurl/src/multihandle.h b/libs/libcurl/src/multihandle.h
index 44b275858b..a19c399af7 100644
--- a/libs/libcurl/src/multihandle.h
+++ b/libs/libcurl/src/multihandle.h
@@ -80,10 +80,6 @@ typedef enum {
/* value for MAXIMUM CONCURRENT STREAMS upper limit */
#define INITIAL_MAX_CONCURRENT_STREAMS ((1U << 31) - 1)
-/* Curl_multi SSL backend-specific data; declared differently by each SSL
- backend */
-struct multi_ssl_backend_data;
-
/* This is the struct known as CURLM on the outside */
struct Curl_multi {
/* First a simple identifier to easier detect if a user mix up
@@ -132,14 +128,17 @@ struct Curl_multi {
char *xfer_ulbuf; /* the actual buffer */
size_t xfer_ulbuf_len; /* the allocated length */
-#if defined(USE_SSL)
- struct multi_ssl_backend_data *ssl_backend_data;
-#endif
-
/* 'sockhash' is the lookup hash for socket descriptor => easy handles (note
the pluralis form, there can be more than one easy handle waiting on the
same actual socket) */
struct Curl_hash sockhash;
+ /* `proto_hash` is a general key-value store for protocol implementations
+ * with the lifetime of the multi handle. The number of elements kept here
+ * should be in the order of supported protocols (and sub-protocols like
+ * TLS), *not* in the order of connections or current transfers!
+ * Elements need to be added with their own destructor to be invoked when
+ * the multi handle is cleaned up (see Curl_hash_add2()).*/
+ struct Curl_hash proto_hash;
/* Shared connection cache (bundles)*/
struct conncache conn_cache;
@@ -149,6 +148,8 @@ struct Curl_multi {
long max_total_connections; /* if >0, a fixed limit of the maximum number
of connections in total */
+ long max_shutdown_connections; /* if >0, a fixed limit of the maximum number
+ of connections in shutdown handling */
/* timer callback and user data pointer for the *socket() API */
curl_multi_timer_callback timer_cb;
@@ -159,13 +160,14 @@ struct Curl_multi {
WSAEVENT wsa_event; /* winsock event used for waits */
#else
#ifdef ENABLE_WAKEUP
- curl_socket_t wakeup_pair[2]; /* pipe()/socketpair() used for wakeup
- 0 is used for read, 1 is used for write */
+ curl_socket_t wakeup_pair[2]; /* eventfd()/pipe()/socketpair() used for
+ wakeup 0 is used for read, 1 is used
+ for write */
#endif
#endif
unsigned int max_concurrent_streams;
unsigned int maxconnects; /* if >0, a fixed limit of the maximum number of
- entries we're allowed to grow the connection
+ entries we are allowed to grow the connection
cache to */
#define IPV6_UNKNOWN 0
#define IPV6_DEAD 1
diff --git a/libs/libcurl/src/multiif.h b/libs/libcurl/src/multiif.h
index ee2b599df2..b528d8b56f 100644
--- a/libs/libcurl/src/multiif.h
+++ b/libs/libcurl/src/multiif.h
@@ -76,7 +76,7 @@ void Curl_multiuse_state(struct Curl_easy *data,
* Curl_multi_closed()
*
* Used by the connect code to tell the multi_socket code that one of the
- * sockets we were using is about to be closed. This function will then
+ * sockets we were using is about to be closed. This function will then
* remove it from the sockethash for this handle to make the multi_socket API
* behave properly, especially for the case when libcurl will create another
* socket again and it gets the same file descriptor number.
@@ -84,6 +84,15 @@ void Curl_multiuse_state(struct Curl_easy *data,
void Curl_multi_closed(struct Curl_easy *data, curl_socket_t s);
+/* Compare the two pollsets to notify the multi_socket API of changes
+ * in socket polling, e.g calling multi->socket_cb() with the changes if
+ * differences are seen.
+ */
+CURLMcode Curl_multi_pollset_ev(struct Curl_multi *multi,
+ struct Curl_easy *data,
+ struct easy_pollset *ps,
+ struct easy_pollset *last_ps);
+
/*
* Add a handle and move it into PERFORM state at once. For pushed streams.
*/
diff --git a/libs/libcurl/src/netrc.c b/libs/libcurl/src/netrc.c
index 700e085cf0..c0c7840620 100644
--- a/libs/libcurl/src/netrc.c
+++ b/libs/libcurl/src/netrc.c
@@ -237,7 +237,7 @@ static int parsenetrc(const char *host,
else if(strcasecompare("password", tok))
state_password = 1;
else if(strcasecompare("machine", tok)) {
- /* ok, there's machine here go => */
+ /* ok, there is machine here go => */
state = HOSTFOUND;
state_our_login = FALSE;
}
@@ -277,7 +277,7 @@ out:
/*
* @unittest: 1304
*
- * *loginp and *passwordp MUST be allocated if they aren't NULL when passed
+ * *loginp and *passwordp MUST be allocated if they are not NULL when passed
* in.
*/
int Curl_parsenetrc(const char *host, char **loginp, char **passwordp,
diff --git a/libs/libcurl/src/netrc.h b/libs/libcurl/src/netrc.h
index 362cd79343..14d69224c6 100644
--- a/libs/libcurl/src/netrc.h
+++ b/libs/libcurl/src/netrc.h
@@ -27,7 +27,7 @@
#include "curl_setup.h"
#ifndef CURL_DISABLE_NETRC
-/* returns -1 on failure, 0 if the host is found, 1 is the host isn't found */
+/* returns -1 on failure, 0 if the host is found, 1 is the host is not found */
int Curl_parsenetrc(const char *host, char **loginp,
char **passwordp, char *filename);
/* Assume: (*passwordp)[0]=0, host[0] != 0.
diff --git a/libs/libcurl/src/nonblock.c b/libs/libcurl/src/nonblock.c
index d46b5b2659..4b1268aa98 100644
--- a/libs/libcurl/src/nonblock.c
+++ b/libs/libcurl/src/nonblock.c
@@ -50,9 +50,18 @@ int curlx_nonblock(curl_socket_t sockfd, /* operate on this */
/* most recent unix versions */
int flags;
flags = sfcntl(sockfd, F_GETFL, 0);
+ if(flags < 0)
+ return -1;
+ /* Check if the current file status flags have already satisfied
+ * the request, if so, it is no need to call fcntl() to replicate it.
+ */
+ if(!!(flags & O_NONBLOCK) == !!nonblock)
+ return 0;
if(nonblock)
- return sfcntl(sockfd, F_SETFL, flags | O_NONBLOCK);
- return sfcntl(sockfd, F_SETFL, flags & (~O_NONBLOCK));
+ flags |= O_NONBLOCK;
+ else
+ flags &= ~O_NONBLOCK;
+ return sfcntl(sockfd, F_SETFL, flags);
#elif defined(HAVE_IOCTL_FIONBIO)
@@ -64,7 +73,7 @@ int curlx_nonblock(curl_socket_t sockfd, /* operate on this */
/* Windows */
unsigned long flags = nonblock ? 1UL : 0UL;
- return ioctlsocket(sockfd, FIONBIO, &flags);
+ return ioctlsocket(sockfd, (long)FIONBIO, &flags);
#elif defined(HAVE_IOCTLSOCKET_CAMEL_FIONBIO)
diff --git a/libs/libcurl/src/noproxy.c b/libs/libcurl/src/noproxy.c
index dae21245aa..8580235b56 100644
--- a/libs/libcurl/src/noproxy.c
+++ b/libs/libcurl/src/noproxy.c
@@ -79,22 +79,22 @@ UNITTEST bool Curl_cidr6_match(const char *ipv6,
unsigned int bits)
{
#ifdef USE_IPV6
- int bytes;
- int rest;
+ unsigned int bytes;
+ unsigned int rest;
unsigned char address[16];
unsigned char check[16];
if(!bits)
bits = 128;
- bytes = bits/8;
+ bytes = bits / 8;
rest = bits & 0x07;
+ if((bytes > 16) || ((bytes == 16) && rest))
+ return FALSE;
if(1 != Curl_inet_pton(AF_INET6, ipv6, address))
return FALSE;
if(1 != Curl_inet_pton(AF_INET6, network, check))
return FALSE;
- if((bytes > 16) || ((bytes == 16) && rest))
- return FALSE;
if(bytes && memcmp(address, check, bytes))
return FALSE;
if(rest && !((address[bytes] ^ check[bytes]) & (0xff << (8 - rest))))
@@ -119,13 +119,12 @@ enum nametype {
* Checks if the host is in the noproxy list. returns TRUE if it matches and
* therefore the proxy should NOT be used.
****************************************************************/
-bool Curl_check_noproxy(const char *name, const char *no_proxy,
- bool *spacesep)
+bool Curl_check_noproxy(const char *name, const char *no_proxy)
{
char hostip[128];
- *spacesep = FALSE;
+
/*
- * If we don't have a hostname at all, like for example with a FILE
+ * If we do not have a hostname at all, like for example with a FILE
* transfer, we have nothing to interrogate the noproxy list with.
*/
if(!name || name[0] == '\0')
@@ -143,7 +142,7 @@ bool Curl_check_noproxy(const char *name, const char *no_proxy,
if(!strcmp("*", no_proxy))
return TRUE;
- /* NO_PROXY was specified and it wasn't just an asterisk */
+ /* NO_PROXY was specified and it was not just an asterisk */
if(name[0] == '[') {
char *endptr;
@@ -166,7 +165,7 @@ bool Curl_check_noproxy(const char *name, const char *no_proxy,
if(1 == Curl_inet_pton(AF_INET, name, &address))
type = TYPE_IPV4;
else {
- /* ignore trailing dots in the host name */
+ /* ignore trailing dots in the hostname */
if(name[namelen - 1] == '.')
namelen--;
}
@@ -232,7 +231,9 @@ bool Curl_check_noproxy(const char *name, const char *no_proxy,
slash = strchr(check, '/');
/* if the slash is part of this token, use it */
if(slash) {
- bits = atoi(slash + 1);
+ /* if the bits variable gets a crazy value here, that is fine as
+ the value will then be rejected in the cidr function */
+ bits = (unsigned int)atoi(slash + 1);
*slash = 0; /* null terminate there */
}
if(type == TYPE_IPV6)
@@ -248,16 +249,14 @@ bool Curl_check_noproxy(const char *name, const char *no_proxy,
/* pass blanks after pattern */
while(ISBLANK(*p))
p++;
- /* if not a comma! */
- if(*p && (*p != ',')) {
- *spacesep = TRUE;
- continue;
- }
+ /* if not a comma, this ends the loop */
+ if(*p != ',')
+ break;
/* pass any number of commas */
while(*p == ',')
p++;
} /* while(*p) */
- } /* NO_PROXY was specified and it wasn't just an asterisk */
+ } /* NO_PROXY was specified and it was not just an asterisk */
return FALSE;
}
diff --git a/libs/libcurl/src/noproxy.h b/libs/libcurl/src/noproxy.h
index 1cd7f93225..5e1010b608 100644
--- a/libs/libcurl/src/noproxy.h
+++ b/libs/libcurl/src/noproxy.h
@@ -27,7 +27,7 @@
#ifndef CURL_DISABLE_PROXY
-#ifdef DEBUGBUILD
+#ifdef UNITTESTS
UNITTEST bool Curl_cidr4_match(const char *ipv4, /* 1.2.3.4 address */
const char *network, /* 1.2.3.4 address */
@@ -37,9 +37,7 @@ UNITTEST bool Curl_cidr6_match(const char *ipv6,
unsigned int bits);
#endif
-bool Curl_check_noproxy(const char *name, const char *no_proxy,
- bool *spacesep);
-
+bool Curl_check_noproxy(const char *name, const char *no_proxy);
#endif
#endif /* HEADER_CURL_NOPROXY_H */
diff --git a/libs/libcurl/src/openldap.c b/libs/libcurl/src/openldap.c
index 5263d17aec..f992d994fc 100644
--- a/libs/libcurl/src/openldap.c
+++ b/libs/libcurl/src/openldap.c
@@ -921,7 +921,7 @@ static CURLcode oldap_do(struct Curl_easy *data, bool *done)
else {
lr->msgid = msgid;
data->req.p.ldap = lr;
- Curl_xfer_setup(data, FIRSTSOCKET, -1, FALSE, -1);
+ Curl_xfer_setup1(data, CURL_XFER_RECV, -1, FALSE);
*done = TRUE;
}
}
@@ -1152,7 +1152,7 @@ ldapsb_tls_remove(Sockbuf_IO_Desc *sbiod)
return 0;
}
-/* We don't need to do anything because libcurl does it already */
+/* We do not need to do anything because libcurl does it already */
static int
ldapsb_tls_close(Sockbuf_IO_Desc *sbiod)
{
diff --git a/libs/libcurl/src/parsedate.c b/libs/libcurl/src/parsedate.c
index c6ae8fb8e4..41fe9455dd 100644
--- a/libs/libcurl/src/parsedate.c
+++ b/libs/libcurl/src/parsedate.c
@@ -244,7 +244,7 @@ static int checkmonth(const char *check, size_t len)
}
/* return the time zone offset between GMT and the input one, in number
- of seconds or -1 if the timezone wasn't found/legal */
+ of seconds or -1 if the timezone was not found/legal */
static int checktz(const char *check, size_t len)
{
@@ -265,7 +265,7 @@ static int checktz(const char *check, size_t len)
static void skip(const char **date)
{
- /* skip everything that aren't letters or digits */
+ /* skip everything that are not letters or digits */
while(**date && !ISALNUM(**date))
(*date)++;
}
@@ -277,7 +277,7 @@ enum assume {
};
/*
- * time2epoch: time stamp to seconds since epoch in GMT time zone. Similar to
+ * time2epoch: time stamp to seconds since epoch in GMT time zone. Similar to
* mktime but for GMT only.
*/
static time_t time2epoch(int sec, int min, int hour,
@@ -445,7 +445,7 @@ static int parsedate(const char *date, time_t *output)
((date[-1] == '+' || date[-1] == '-'))) {
/* four digits and a value less than or equal to 1400 (to take into
account all sorts of funny time zone diffs) and it is preceded
- with a plus or minus. This is a time zone indication. 1400 is
+ with a plus or minus. This is a time zone indication. 1400 is
picked since +1300 is frequently used and +1400 is mentioned as
an edge number in the document "ISO C 200X Proposal: Timezone
Functions" at http://david.tribble.com/text/c0xtimezone.html If
@@ -521,13 +521,13 @@ static int parsedate(const char *date, time_t *output)
#if (SIZEOF_TIME_T < 5)
#ifdef HAVE_TIME_T_UNSIGNED
- /* an unsigned 32 bit time_t can only hold dates to 2106 */
+ /* an unsigned 32-bit time_t can only hold dates to 2106 */
if(yearnum > 2105) {
*output = TIME_T_MAX;
return PARSEDATE_LATER;
}
#else
- /* a signed 32 bit time_t can only hold dates to the beginning of 2038 */
+ /* a signed 32-bit time_t can only hold dates to the beginning of 2038 */
if(yearnum > 2037) {
*output = TIME_T_MAX;
return PARSEDATE_LATER;
@@ -549,7 +549,7 @@ static int parsedate(const char *date, time_t *output)
return PARSEDATE_FAIL; /* clearly an illegal date */
/* time2epoch() returns a time_t. time_t is often 32 bits, sometimes even on
- architectures that feature 64 bit 'long' but ultimately time_t is the
+ architectures that feature a 64 bits 'long' but ultimately time_t is the
correct data type to use.
*/
t = time2epoch(secnum, minnum, hournum, mdaynum, monnum, yearnum);
diff --git a/libs/libcurl/src/pingpong.c b/libs/libcurl/src/pingpong.c
index b39a7c75cf..809d519992 100644
--- a/libs/libcurl/src/pingpong.c
+++ b/libs/libcurl/src/pingpong.c
@@ -119,7 +119,7 @@ CURLcode Curl_pp_statemach(struct Curl_easy *data,
interval_ms);
if(block) {
- /* if we didn't wait, we don't have to spend time on this now */
+ /* if we did not wait, we do not have to spend time on this now */
if(Curl_pgrsUpdate(data))
result = CURLE_ABORTED_BY_CALLBACK;
else
@@ -179,7 +179,7 @@ CURLcode Curl_pp_vsendf(struct Curl_easy *data,
DEBUGASSERT(pp->sendthis == NULL);
if(!conn)
- /* can't send without a connection! */
+ /* cannot send without a connection! */
return CURLE_SEND_ERROR;
Curl_dyn_reset(&pp->sendbuf);
@@ -329,7 +329,7 @@ CURLcode Curl_pp_readresp(struct Curl_easy *data,
char *nl = memchr(line, '\n', Curl_dyn_len(&pp->recvbuf));
if(nl) {
/* a newline is CRLF in pp-talk, so the CR is ignored as
- the line isn't really terminated until the LF comes */
+ the line is not really terminated until the LF comes */
size_t length = nl - line + 1;
/* output debug output if that is requested */
@@ -372,7 +372,7 @@ CURLcode Curl_pp_readresp(struct Curl_easy *data,
break;
}
- } while(1); /* while there's buffer left to scan */
+ } while(1); /* while there is buffer left to scan */
pp->pending_resp = FALSE;
diff --git a/libs/libcurl/src/pingpong.h b/libs/libcurl/src/pingpong.h
index 887d4e8ed9..4b096c250d 100644
--- a/libs/libcurl/src/pingpong.h
+++ b/libs/libcurl/src/pingpong.h
@@ -37,7 +37,7 @@ struct connectdata;
typedef enum {
PPTRANSFER_BODY, /* yes do transfer a body */
PPTRANSFER_INFO, /* do still go through to get info/headers */
- PPTRANSFER_NONE /* don't get anything and don't get info */
+ PPTRANSFER_NONE /* do not get anything and do not get info */
} curl_pp_transfer;
/*
@@ -83,7 +83,7 @@ struct pingpong {
* Curl_pp_statemach()
*
* called repeatedly until done. Set 'wait' to make it wait a while on the
- * socket if there's no traffic.
+ * socket if there is no traffic.
*/
CURLcode Curl_pp_statemach(struct Curl_easy *data, struct pingpong *pp,
bool block, bool disconnecting);
diff --git a/libs/libcurl/src/pop3.c b/libs/libcurl/src/pop3.c
index 2f113fb246..5a9dfbe0d9 100644
--- a/libs/libcurl/src/pop3.c
+++ b/libs/libcurl/src/pop3.c
@@ -406,7 +406,7 @@ static CURLcode pop3_perform_user(struct Curl_easy *data,
CURLcode result = CURLE_OK;
/* Check we have a username and password to authenticate with and end the
- connect phase if we don't */
+ connect phase if we do not */
if(!data->state.aptr.user) {
pop3_state(data, POP3_STOP);
@@ -440,7 +440,7 @@ static CURLcode pop3_perform_apop(struct Curl_easy *data,
char secret[2 * MD5_DIGEST_LEN + 1];
/* Check we have a username and password to authenticate with and end the
- connect phase if we don't */
+ connect phase if we do not */
if(!data->state.aptr.user) {
pop3_state(data, POP3_STOP);
@@ -550,7 +550,7 @@ static CURLcode pop3_perform_authentication(struct Curl_easy *data,
saslprogress progress = SASL_IDLE;
/* Check we have enough data to authenticate with and end the
- connect phase if we don't */
+ connect phase if we do not */
if(!Curl_sasl_can_authenticate(&pop3c->sasl, data)) {
pop3_state(data, POP3_STOP);
return result;
@@ -758,7 +758,7 @@ static CURLcode pop3_state_capa_resp(struct Curl_easy *data, int pop3code,
}
}
else {
- /* Clear text is supported when CAPA isn't recognised */
+ /* Clear text is supported when CAPA is not recognised */
if(pop3code != '+')
pop3c->authtypes |= POP3_TYPE_CLEARTEXT;
@@ -931,12 +931,12 @@ static CURLcode pop3_state_command_resp(struct Curl_easy *data,
pop3c->eob = 2;
/* But since this initial CR LF pair is not part of the actual body, we set
- the strip counter here so that these bytes won't be delivered. */
+ the strip counter here so that these bytes will not be delivered. */
pop3c->strip = 2;
if(pop3->transfer == PPTRANSFER_BODY) {
/* POP3 download */
- Curl_xfer_setup(data, FIRSTSOCKET, -1, FALSE, -1);
+ Curl_xfer_setup1(data, CURL_XFER_RECV, -1, FALSE);
if(pp->overflow) {
/* The recv buffer contains data that is actually body content so send
@@ -1477,7 +1477,7 @@ CURLcode Curl_pop3_write(struct Curl_easy *data, const char *str, size_t nread)
pop3c->eob++;
if(i) {
- /* Write out the body part that didn't match */
+ /* Write out the body part that did not match */
result = Curl_client_write(data, CLIENTWRITE_BODY, &str[last],
i - last);
@@ -1490,7 +1490,7 @@ CURLcode Curl_pop3_write(struct Curl_easy *data, const char *str, size_t nread)
else if(pop3c->eob == 3)
pop3c->eob++;
else
- /* If the character match wasn't at position 0 or 3 then restart the
+ /* If the character match was not at position 0 or 3 then restart the
pattern matching */
pop3c->eob = 1;
break;
@@ -1499,7 +1499,7 @@ CURLcode Curl_pop3_write(struct Curl_easy *data, const char *str, size_t nread)
if(pop3c->eob == 1 || pop3c->eob == 4)
pop3c->eob++;
else
- /* If the character match wasn't at position 1 or 4 then start the
+ /* If the character match was not at position 1 or 4 then start the
search again */
pop3c->eob = 0;
break;
@@ -1513,7 +1513,7 @@ CURLcode Curl_pop3_write(struct Curl_easy *data, const char *str, size_t nread)
pop3c->eob = 0;
}
else
- /* If the character match wasn't at position 2 then start the search
+ /* If the character match was not at position 2 then start the search
again */
pop3c->eob = 0;
break;
diff --git a/libs/libcurl/src/progress.c b/libs/libcurl/src/progress.c
index f15657155e..4e6d71f9b7 100644
--- a/libs/libcurl/src/progress.c
+++ b/libs/libcurl/src/progress.c
@@ -82,13 +82,13 @@ static char *max5data(curl_off_t bytes, char *max5)
msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "k", bytes/ONE_KILOBYTE);
else if(bytes < CURL_OFF_T_C(100) * ONE_MEGABYTE)
- /* 'XX.XM' is good as long as we're less than 100 megs */
+ /* 'XX.XM' is good as long as we are less than 100 megs */
msnprintf(max5, 6, "%2" CURL_FORMAT_CURL_OFF_T ".%0"
CURL_FORMAT_CURL_OFF_T "M", bytes/ONE_MEGABYTE,
(bytes%ONE_MEGABYTE) / (ONE_MEGABYTE/CURL_OFF_T_C(10)) );
else if(bytes < CURL_OFF_T_C(10000) * ONE_MEGABYTE)
- /* 'XXXXM' is good until we're at 10000MB or above */
+ /* 'XXXXM' is good until we are at 10000MB or above */
msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "M", bytes/ONE_MEGABYTE);
else if(bytes < CURL_OFF_T_C(100) * ONE_GIGABYTE)
@@ -109,7 +109,7 @@ static char *max5data(curl_off_t bytes, char *max5)
/* up to 10000PB, display without decimal: XXXXP */
msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "P", bytes/ONE_PETABYTE);
- /* 16384 petabytes (16 exabytes) is the maximum a 64 bit unsigned number can
+ /* 16384 petabytes (16 exabytes) is the maximum a 64-bit unsigned number can
hold, but our data type is signed so 8192PB will be the maximum. */
return max5;
@@ -140,7 +140,7 @@ int Curl_pgrsDone(struct Curl_easy *data)
if(!(data->progress.flags & PGRS_HIDE) &&
!data->progress.callback)
- /* only output if we don't use a progress callback and we're not
+ /* only output if we do not use a progress callback and we are not
* hidden */
fprintf(data->set.err, "\n");
@@ -204,7 +204,7 @@ void Curl_pgrsTimeWas(struct Curl_easy *data, timerid timer,
case TIMER_STARTTRANSFER:
delta = &data->progress.t_starttransfer;
/* prevent updating t_starttransfer unless:
- * 1) this is the first time we're setting t_starttransfer
+ * 1) this is the first time we are setting t_starttransfer
* 2) a redirect has occurred since the last time t_starttransfer was set
* This prevents repeated invocations of the function from incorrectly
* changing the t_starttransfer time.
@@ -265,11 +265,11 @@ void Curl_pgrsStartNow(struct Curl_easy *data)
/*
* This is used to handle speed limits, calculating how many milliseconds to
- * wait until we're back under the speed limit, if needed.
+ * wait until we are back under the speed limit, if needed.
*
* The way it works is by having a "starting point" (time & amount of data
* transferred by then) used in the speed computation, to be used instead of
- * the start of the transfer. This starting point is regularly moved as
+ * the start of the transfer. This starting point is regularly moved as
* transfer goes on, to keep getting accurate values (instead of average over
* the entire transfer).
*
@@ -336,7 +336,7 @@ CURLcode Curl_pgrsSetDownloadCounter(struct Curl_easy *data, curl_off_t size)
*/
void Curl_ratelimit(struct Curl_easy *data, struct curltime now)
{
- /* don't set a new stamp unless the time since last update is long enough */
+ /* do not set a new stamp unless the time since last update is long enough */
if(data->set.max_recv_speed) {
if(Curl_timediff(now, data->progress.dl_limit_start) >=
MIN_RATE_LIMIT_PERIOD) {
@@ -399,7 +399,7 @@ static curl_off_t trspeed(curl_off_t size, /* number of bytes */
return CURL_OFF_T_MAX;
}
-/* returns TRUE if it's time to show the progress meter */
+/* returns TRUE if it is time to show the progress meter */
static bool progress_calc(struct Curl_easy *data, struct curltime now)
{
bool timetoshow = FALSE;
@@ -431,10 +431,10 @@ static bool progress_calc(struct Curl_easy *data, struct curltime now)
/* figure out how many index entries of data we have stored in our speeder
array. With N_ENTRIES filled in, we have about N_ENTRIES-1 seconds of
transfer. Imagine, after one second we have filled in two entries,
- after two seconds we've filled in three entries etc. */
+ after two seconds we have filled in three entries etc. */
countindex = ((p->speeder_c >= CURR_TIME)? CURR_TIME:p->speeder_c) - 1;
- /* first of all, we don't do this if there's no counted seconds yet */
+ /* first of all, we do not do this if there is no counted seconds yet */
if(countindex) {
int checkindex;
timediff_t span_ms;
@@ -587,14 +587,12 @@ static void progress_meter(struct Curl_easy *data)
* Curl_pgrsUpdate() returns 0 for success or the value returned by the
* progress callback!
*/
-int Curl_pgrsUpdate(struct Curl_easy *data)
+static int pgrsupdate(struct Curl_easy *data, bool showprogress)
{
- struct curltime now = Curl_now(); /* what time is it */
- bool showprogress = progress_calc(data, now);
if(!(data->progress.flags & PGRS_HIDE)) {
if(data->set.fxferinfo) {
int result;
- /* There's a callback set, call that */
+ /* There is a callback set, call that */
Curl_set_in_callback(data, true);
result = data->set.fxferinfo(data->set.progress_client,
data->progress.size_dl,
@@ -631,3 +629,19 @@ int Curl_pgrsUpdate(struct Curl_easy *data)
return 0;
}
+
+int Curl_pgrsUpdate(struct Curl_easy *data)
+{
+ struct curltime now = Curl_now(); /* what time is it */
+ bool showprogress = progress_calc(data, now);
+ return pgrsupdate(data, showprogress);
+}
+
+/*
+ * Update all progress, do not do progress meter/callbacks.
+ */
+void Curl_pgrsUpdate_nometer(struct Curl_easy *data)
+{
+ struct curltime now = Curl_now(); /* what time is it */
+ (void)progress_calc(data, now);
+}
diff --git a/libs/libcurl/src/progress.h b/libs/libcurl/src/progress.h
index 745dc1d5bb..c2b90146d7 100644
--- a/libs/libcurl/src/progress.h
+++ b/libs/libcurl/src/progress.h
@@ -54,6 +54,8 @@ CURLcode Curl_pgrsSetDownloadCounter(struct Curl_easy *data, curl_off_t size);
void Curl_pgrsSetUploadCounter(struct Curl_easy *data, curl_off_t size);
void Curl_ratelimit(struct Curl_easy *data, struct curltime now);
int Curl_pgrsUpdate(struct Curl_easy *data);
+void Curl_pgrsUpdate_nometer(struct Curl_easy *data);
+
void Curl_pgrsResetTransferSizes(struct Curl_easy *data);
struct curltime Curl_pgrsTime(struct Curl_easy *data, timerid timer);
timediff_t Curl_pgrsLimitWaitTime(curl_off_t cursize,
diff --git a/libs/libcurl/src/rand.c b/libs/libcurl/src/rand.c
index a0865e25b7..772462513f 100644
--- a/libs/libcurl/src/rand.c
+++ b/libs/libcurl/src/rand.c
@@ -48,7 +48,8 @@
#ifdef _WIN32
-#if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x600
+#if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x600 && \
+ !defined(CURL_WINDOWS_APP)
# define HAVE_WIN_BCRYPTGENRANDOM
# include <bcrypt.h>
# ifdef _MSC_VER
@@ -105,7 +106,7 @@ static CURLcode randit(struct Curl_easy *data, unsigned int *rnd)
static unsigned int randseed;
static bool seeded = FALSE;
-#ifdef CURLDEBUG
+#ifdef DEBUGBUILD
char *force_entropy = getenv("CURL_ENTROPY");
if(force_entropy) {
if(!seeded) {
@@ -150,7 +151,7 @@ static CURLcode randit(struct Curl_easy *data, unsigned int *rnd)
#if defined(RANDOM_FILE) && !defined(_WIN32)
if(!seeded) {
- /* if there's a random file to read a seed from, use it */
+ /* if there is a random file to read a seed from, use it */
int fd = open(RANDOM_FILE, O_RDONLY);
if(fd > -1) {
/* read random data into the randseed variable */
@@ -269,7 +270,7 @@ CURLcode Curl_rand_alnum(struct Curl_easy *data, unsigned char *rnd,
size_t num)
{
CURLcode result = CURLE_OK;
- const int alnumspace = sizeof(alnum) - 1;
+ const unsigned int alnumspace = sizeof(alnum) - 1;
unsigned int r;
DEBUGASSERT(num > 1);
@@ -282,7 +283,7 @@ CURLcode Curl_rand_alnum(struct Curl_easy *data, unsigned char *rnd,
return result;
} while(r >= (UINT_MAX - UINT_MAX % alnumspace));
- *rnd++ = alnum[r % alnumspace];
+ *rnd++ = (unsigned char)alnum[r % alnumspace];
num--;
}
*rnd = 0;
diff --git a/libs/libcurl/src/rename.c b/libs/libcurl/src/rename.c
index 9eb7b41076..a35f6ce76e 100644
--- a/libs/libcurl/src/rename.c
+++ b/libs/libcurl/src/rename.c
@@ -41,7 +41,7 @@
int Curl_rename(const char *oldpath, const char *newpath)
{
#ifdef _WIN32
- /* rename() on Windows doesn't overwrite, so we can't use it here.
+ /* rename() on Windows does not overwrite, so we cannot use it here.
MoveFileEx() will overwrite and is usually atomic, however it fails
when there are open handles to the file. */
const int max_wait_ms = 1000;
diff --git a/libs/libcurl/src/request.c b/libs/libcurl/src/request.c
index 54b43e12ce..45812c113a 100644
--- a/libs/libcurl/src/request.c
+++ b/libs/libcurl/src/request.c
@@ -54,6 +54,7 @@ CURLcode Curl_req_soft_reset(struct SingleRequest *req,
req->upload_done = FALSE;
req->download_done = FALSE;
req->ignorebody = FALSE;
+ req->shutdown = FALSE;
req->bytecount = 0;
req->writebytecount = 0;
req->header = TRUE; /* assume header */
@@ -108,17 +109,14 @@ void Curl_req_hard_reset(struct SingleRequest *req, struct Curl_easy *data)
/* This is a bit ugly. `req->p` is a union and we assume we can
* free this safely without leaks. */
- Curl_safefree(req->p.http);
+ Curl_safefree(req->p.ftp);
Curl_safefree(req->newurl);
Curl_client_reset(data);
if(req->sendbuf_init)
Curl_bufq_reset(&req->sendbuf);
#ifndef CURL_DISABLE_DOH
- if(req->doh) {
- Curl_close(&req->doh->probe[0].easy);
- Curl_close(&req->doh->probe[1].easy);
- }
+ Curl_doh_close(data);
#endif
/* Can no longer memset() this struct as we need to keep some state */
req->size = -1;
@@ -135,7 +133,6 @@ void Curl_req_hard_reset(struct SingleRequest *req, struct Curl_easy *data)
req->keepon = 0;
req->upgr101 = UPGR101_INIT;
req->timeofdoc = 0;
- req->bodywrites = 0;
req->location = NULL;
req->newurl = NULL;
#ifndef CURL_DISABLE_COOKIES
@@ -156,27 +153,24 @@ void Curl_req_hard_reset(struct SingleRequest *req, struct Curl_easy *data)
req->getheader = FALSE;
req->no_body = data->set.opt_no_body;
req->authneg = FALSE;
+ req->shutdown = FALSE;
+#ifdef USE_HYPER
+ req->bodywritten = FALSE;
+#endif
}
void Curl_req_free(struct SingleRequest *req, struct Curl_easy *data)
{
/* This is a bit ugly. `req->p` is a union and we assume we can
* free this safely without leaks. */
- Curl_safefree(req->p.http);
+ Curl_safefree(req->p.ftp);
Curl_safefree(req->newurl);
if(req->sendbuf_init)
Curl_bufq_free(&req->sendbuf);
Curl_client_cleanup(data);
#ifndef CURL_DISABLE_DOH
- if(req->doh) {
- Curl_close(&req->doh->probe[0].easy);
- Curl_close(&req->doh->probe[1].easy);
- Curl_dyn_free(&req->doh->probe[0].serverdoh);
- Curl_dyn_free(&req->doh->probe[1].serverdoh);
- curl_slist_free_all(req->doh->headers);
- Curl_safefree(req->doh);
- }
+ Curl_doh_cleanup(data);
#endif
}
@@ -187,7 +181,7 @@ static CURLcode xfer_send(struct Curl_easy *data,
CURLcode result = CURLE_OK;
*pnwritten = 0;
-#ifdef CURLDEBUG
+#ifdef DEBUGBUILD
{
/* Allow debug builds to override this logic to force short initial
sends
@@ -200,7 +194,7 @@ static CURLcode xfer_send(struct Curl_easy *data,
}
}
#endif
- /* Make sure this doesn't send more body bytes than what the max send
+ /* Make sure this does not send more body bytes than what the max send
speed says. The headers do not count to the max speed. */
if(data->set.max_send_speed) {
size_t body_bytes = blen - hds_len;
@@ -251,7 +245,7 @@ static CURLcode req_set_upload_done(struct Curl_easy *data)
{
DEBUGASSERT(!data->req.upload_done);
data->req.upload_done = TRUE;
- data->req.keepon &= ~(KEEP_SEND|KEEP_SEND_TIMED); /* we're done sending */
+ data->req.keepon &= ~(KEEP_SEND|KEEP_SEND_TIMED); /* we are done sending */
Curl_creader_done(data, data->req.upload_aborted);
@@ -291,6 +285,14 @@ static CURLcode req_flush(struct Curl_easy *data)
if(!data->req.upload_done && data->req.eos_read &&
Curl_bufq_is_empty(&data->req.sendbuf)) {
+ if(data->req.shutdown) {
+ bool done;
+ result = Curl_xfer_send_shutdown(data, &done);
+ if(result)
+ return result;
+ if(!done)
+ return CURLE_AGAIN;
+ }
return req_set_upload_done(data);
}
return CURLE_OK;
diff --git a/libs/libcurl/src/request.h b/libs/libcurl/src/request.h
index 570ab4f10c..ba4964b724 100644
--- a/libs/libcurl/src/request.h
+++ b/libs/libcurl/src/request.h
@@ -51,10 +51,10 @@ enum upgrade101 {
/*
- * Request specific data in the easy handle (Curl_easy). Previously,
+ * Request specific data in the easy handle (Curl_easy). Previously,
* these members were on the connectdata struct but since a conn struct may
* now be shared between different Curl_easys, we store connection-specific
- * data here. This struct only keeps stuff that's interesting for *this*
+ * data here. This struct only keeps stuff that is interesting for *this*
* request, as it will be cleared between multiple ones
*/
struct SingleRequest {
@@ -68,7 +68,7 @@ struct SingleRequest {
unsigned int headerbytecount; /* received server headers (not CONNECT
headers) */
unsigned int allheadercount; /* all received headers (server + CONNECT) */
- unsigned int deductheadercount; /* this amount of bytes doesn't count when
+ unsigned int deductheadercount; /* this amount of bytes does not count when
we check if anything has been transferred
at the end of a connection. We use this
counter to make only a 100 reply (without
@@ -93,7 +93,6 @@ struct SingleRequest {
struct bufq sendbuf; /* data which needs to be send to the server */
size_t sendbuf_hds_len; /* amount of header bytes in sendbuf */
time_t timeofdoc;
- long bodywrites;
char *location; /* This points to an allocated version of the Location:
header data */
char *newurl; /* Set to the new URL to use when a redirect or a retry is
@@ -104,7 +103,6 @@ struct SingleRequest {
union {
struct FILEPROTO *file;
struct FTP *ftp;
- struct HTTP *http;
struct IMAP *imap;
struct ldapreqinfo *ldap;
struct MQTT *mqtt;
@@ -147,6 +145,10 @@ struct SingleRequest {
but it is not the final request in the auth
negotiation. */
BIT(sendbuf_init); /* sendbuf is initialized */
+ BIT(shutdown); /* request end will shutdown connection */
+#ifdef USE_HYPER
+ BIT(bodywritten);
+#endif
};
/**
diff --git a/libs/libcurl/src/rtsp.c b/libs/libcurl/src/rtsp.c
index b1aad90c75..bf29c34cfb 100644
--- a/libs/libcurl/src/rtsp.c
+++ b/libs/libcurl/src/rtsp.c
@@ -79,7 +79,7 @@ static unsigned int rtsp_conncheck(struct Curl_easy *data,
unsigned int checks_to_perform);
/* this returns the socket to wait for in the DO and DOING state for the multi
- interface and then we're always _sending_ a request and thus we wait for
+ interface and then we are always _sending_ a request and thus we wait for
the single socket to become writable only */
static int rtsp_getsock_do(struct Curl_easy *data, struct connectdata *conn,
curl_socket_t *socks)
@@ -261,7 +261,7 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done)
* Since all RTSP requests are included here, there is no need to
* support custom requests like HTTP.
**/
- data->req.no_body = TRUE; /* most requests don't contain a body */
+ data->req.no_body = TRUE; /* most requests do not contain a body */
switch(rtspreq) {
default:
failf(data, "Got invalid RTSP request");
@@ -310,13 +310,15 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done)
}
if(rtspreq == RTSPREQ_RECEIVE) {
- Curl_xfer_setup(data, FIRSTSOCKET, -1, TRUE, -1);
+ Curl_xfer_setup1(data, CURL_XFER_RECV, -1, TRUE);
goto out;
}
p_session_id = data->set.str[STRING_RTSP_SESSION_ID];
if(!p_session_id &&
- (rtspreq & ~(RTSPREQ_OPTIONS | RTSPREQ_DESCRIBE | RTSPREQ_SETUP))) {
+ (rtspreq & ~(Curl_RtspReq)(RTSPREQ_OPTIONS |
+ RTSPREQ_DESCRIBE |
+ RTSPREQ_SETUP))) {
failf(data, "Refusing to issue an RTSP request [%s] without a session ID.",
p_request);
result = CURLE_BAD_FUNCTION_ARGUMENT;
@@ -576,7 +578,7 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done)
if(result)
goto out;
- Curl_xfer_setup(data, FIRSTSOCKET, -1, TRUE, FIRSTSOCKET);
+ Curl_xfer_setup1(data, CURL_XFER_SENDRECV, -1, TRUE);
/* issue the request */
result = Curl_req_send(data, &req_buffer);
@@ -950,7 +952,7 @@ CURLcode Curl_rtsp_parseheader(struct Curl_easy *data, const char *header)
/* Find the end of Session ID
*
* Allow any non whitespace content, up to the field separator or end of
- * line. RFC 2326 isn't 100% clear on the session ID and for example
+ * line. RFC 2326 is not 100% clear on the session ID and for example
* gstreamer does url-encoded session ID's not covered by the standard.
*/
end = start;
diff --git a/libs/libcurl/src/rtsp.h b/libs/libcurl/src/rtsp.h
index 0b841765cf..27e04a0992 100644
--- a/libs/libcurl/src/rtsp.h
+++ b/libs/libcurl/src/rtsp.h
@@ -62,16 +62,6 @@ struct rtsp_conn {
* RTSP unique setup
***************************************************************************/
struct RTSP {
- /*
- * http_wrapper MUST be the first element of this structure for the wrap
- * logic to work. In this way, we get a cheap polymorphism because
- * &(data->state.proto.rtsp) == &(data->state.proto.http) per the C spec
- *
- * HTTP functions can safely treat this as an HTTP struct, but RTSP aware
- * functions can also index into the later elements.
- */
- struct HTTP http_wrapper; /* wrap HTTP to do the heavy lifting */
-
long CSeq_sent; /* CSeq of this request */
long CSeq_recv; /* CSeq received */
};
diff --git a/libs/libcurl/src/select.c b/libs/libcurl/src/select.c
index 64ff9c2eed..a016bcc185 100644
--- a/libs/libcurl/src/select.c
+++ b/libs/libcurl/src/select.c
@@ -33,7 +33,7 @@
#endif
#if !defined(HAVE_SELECT) && !defined(HAVE_POLL_FINE)
-#error "We can't compile without select() or poll() support."
+#error "We cannot compile without select() or poll() support."
#endif
#ifdef MSDOS
@@ -47,6 +47,10 @@
#include "select.h"
#include "timediff.h"
#include "warnless.h"
+/* The last 3 #include files should be in this order */
+#include "curl_printf.h"
+#include "curl_memory.h"
+#include "memdebug.h"
/*
* Internal function used for waiting a specific amount of ms
@@ -81,7 +85,7 @@ int Curl_wait_ms(timediff_t timeout_ms)
#if TIMEDIFF_T_MAX >= ULONG_MAX
if(timeout_ms >= ULONG_MAX)
timeout_ms = ULONG_MAX-1;
- /* don't use ULONG_MAX, because that is equal to INFINITE */
+ /* do not use ULONG_MAX, because that is equal to INFINITE */
#endif
Sleep((ULONG)timeout_ms);
#else
@@ -131,7 +135,7 @@ static int our_select(curl_socket_t maxfd, /* highest socket number */
struct timeval *ptimeout;
#ifdef USE_WINSOCK
- /* WinSock select() can't handle zero events. See the comment below. */
+ /* WinSock select() cannot handle zero events. See the comment below. */
if((!fds_read || fds_read->fd_count == 0) &&
(!fds_write || fds_write->fd_count == 0) &&
(!fds_err || fds_err->fd_count == 0)) {
@@ -144,14 +148,14 @@ static int our_select(curl_socket_t maxfd, /* highest socket number */
#ifdef USE_WINSOCK
/* WinSock select() must not be called with an fd_set that contains zero
- fd flags, or it will return WSAEINVAL. But, it also can't be called
+ fd flags, or it will return WSAEINVAL. But, it also cannot be called
with no fd_sets at all! From the documentation:
Any two of the parameters, readfds, writefds, or exceptfds, can be
given as null. At least one must be non-null, and any non-null
descriptor set must contain at least one handle to a socket.
- It is unclear why WinSock doesn't just handle this for us instead of
+ It is unclear why WinSock does not just handle this for us instead of
calling this an error. Luckily, with WinSock, we can _also_ ask how
many bits are set on an fd_set. So, let's just check it beforehand.
*/
@@ -169,7 +173,7 @@ static int our_select(curl_socket_t maxfd, /* highest socket number */
/*
* Wait for read or write events on a set of file descriptors. It uses poll()
* when a fine poll() is available, in order to avoid limits with FD_SETSIZE,
- * otherwise select() is used. An error is returned if select() is being used
+ * otherwise select() is used. An error is returned if select() is being used
* and a file descriptor is too large for FD_SETSIZE.
*
* A negative timeout value makes this function wait indefinitely,
@@ -226,7 +230,7 @@ int Curl_socket_check(curl_socket_t readfd0, /* two sockets to read from */
num++;
}
- r = Curl_poll(pfd, num, timeout_ms);
+ r = Curl_poll(pfd, (unsigned int)num, timeout_ms);
if(r <= 0)
return r;
@@ -257,8 +261,8 @@ int Curl_socket_check(curl_socket_t readfd0, /* two sockets to read from */
}
/*
- * This is a wrapper around poll(). If poll() does not exist, then
- * select() is used instead. An error is returned if select() is
+ * This is a wrapper around poll(). If poll() does not exist, then
+ * select() is used instead. An error is returned if select() is
* being used and a file descriptor is too large for FD_SETSIZE.
* A negative timeout value makes this function wait indefinitely,
* unless no valid file descriptor is given, when this happens the
@@ -357,7 +361,7 @@ int Curl_poll(struct pollfd ufds[], unsigned int nfds, timediff_t timeout_ms)
}
/*
- Note also that WinSock ignores the first argument, so we don't worry
+ Note also that WinSock ignores the first argument, so we do not worry
about the fact that maxfd is computed incorrectly with WinSock (since
curl_socket_t is unsigned in such cases and thus -1 is the largest
value).
@@ -401,3 +405,147 @@ int Curl_poll(struct pollfd ufds[], unsigned int nfds, timediff_t timeout_ms)
return r;
}
+
+void Curl_pollfds_init(struct curl_pollfds *cpfds,
+ struct pollfd *static_pfds,
+ unsigned int static_count)
+{
+ DEBUGASSERT(cpfds);
+ memset(cpfds, 0, sizeof(*cpfds));
+ if(static_pfds && static_count) {
+ cpfds->pfds = static_pfds;
+ cpfds->count = static_count;
+ }
+}
+
+void Curl_pollfds_cleanup(struct curl_pollfds *cpfds)
+{
+ DEBUGASSERT(cpfds);
+ if(cpfds->allocated_pfds) {
+ free(cpfds->pfds);
+ }
+ memset(cpfds, 0, sizeof(*cpfds));
+}
+
+static CURLcode cpfds_increase(struct curl_pollfds *cpfds, unsigned int inc)
+{
+ struct pollfd *new_fds;
+ unsigned int new_count = cpfds->count + inc;
+
+ new_fds = calloc(new_count, sizeof(struct pollfd));
+ if(!new_fds)
+ return CURLE_OUT_OF_MEMORY;
+
+ memcpy(new_fds, cpfds->pfds, cpfds->count * sizeof(struct pollfd));
+ if(cpfds->allocated_pfds)
+ free(cpfds->pfds);
+ cpfds->pfds = new_fds;
+ cpfds->count = new_count;
+ cpfds->allocated_pfds = TRUE;
+ return CURLE_OK;
+}
+
+static CURLcode cpfds_add_sock(struct curl_pollfds *cpfds,
+ curl_socket_t sock, short events, bool fold)
+{
+ int i;
+
+ if(fold && cpfds->n <= INT_MAX) {
+ for(i = (int)cpfds->n - 1; i >= 0; --i) {
+ if(sock == cpfds->pfds[i].fd) {
+ cpfds->pfds[i].events |= events;
+ return CURLE_OK;
+ }
+ }
+ }
+ /* not folded, add new entry */
+ if(cpfds->n >= cpfds->count) {
+ if(cpfds_increase(cpfds, 100))
+ return CURLE_OUT_OF_MEMORY;
+ }
+ cpfds->pfds[cpfds->n].fd = sock;
+ cpfds->pfds[cpfds->n].events = events;
+ ++cpfds->n;
+ return CURLE_OK;
+}
+
+CURLcode Curl_pollfds_add_sock(struct curl_pollfds *cpfds,
+ curl_socket_t sock, short events)
+{
+ return cpfds_add_sock(cpfds, sock, events, FALSE);
+}
+
+CURLcode Curl_pollfds_add_ps(struct curl_pollfds *cpfds,
+ struct easy_pollset *ps)
+{
+ size_t i;
+
+ DEBUGASSERT(cpfds);
+ DEBUGASSERT(ps);
+ for(i = 0; i < ps->num; i++) {
+ short events = 0;
+ if(ps->actions[i] & CURL_POLL_IN)
+ events |= POLLIN;
+ if(ps->actions[i] & CURL_POLL_OUT)
+ events |= POLLOUT;
+ if(events) {
+ if(cpfds_add_sock(cpfds, ps->sockets[i], events, TRUE))
+ return CURLE_OUT_OF_MEMORY;
+ }
+ }
+ return CURLE_OK;
+}
+
+void Curl_waitfds_init(struct curl_waitfds *cwfds,
+ struct curl_waitfd *static_wfds,
+ unsigned int static_count)
+{
+ DEBUGASSERT(cwfds);
+ DEBUGASSERT(static_wfds);
+ memset(cwfds, 0, sizeof(*cwfds));
+ cwfds->wfds = static_wfds;
+ cwfds->count = static_count;
+}
+
+static CURLcode cwfds_add_sock(struct curl_waitfds *cwfds,
+ curl_socket_t sock, short events)
+{
+ int i;
+
+ if(cwfds->n <= INT_MAX) {
+ for(i = (int)cwfds->n - 1; i >= 0; --i) {
+ if(sock == cwfds->wfds[i].fd) {
+ cwfds->wfds[i].events |= events;
+ return CURLE_OK;
+ }
+ }
+ }
+ /* not folded, add new entry */
+ if(cwfds->n >= cwfds->count)
+ return CURLE_OUT_OF_MEMORY;
+ cwfds->wfds[cwfds->n].fd = sock;
+ cwfds->wfds[cwfds->n].events = events;
+ ++cwfds->n;
+ return CURLE_OK;
+}
+
+CURLcode Curl_waitfds_add_ps(struct curl_waitfds *cwfds,
+ struct easy_pollset *ps)
+{
+ size_t i;
+
+ DEBUGASSERT(cwfds);
+ DEBUGASSERT(ps);
+ for(i = 0; i < ps->num; i++) {
+ short events = 0;
+ if(ps->actions[i] & CURL_POLL_IN)
+ events |= CURL_WAIT_POLLIN;
+ if(ps->actions[i] & CURL_POLL_OUT)
+ events |= CURL_WAIT_POLLOUT;
+ if(events) {
+ if(cwfds_add_sock(cwfds, ps->sockets[i], events))
+ return CURLE_OUT_OF_MEMORY;
+ }
+ }
+ return CURLE_OK;
+}
diff --git a/libs/libcurl/src/select.h b/libs/libcurl/src/select.h
index edd1865cea..6af855f950 100644
--- a/libs/libcurl/src/select.h
+++ b/libs/libcurl/src/select.h
@@ -111,4 +111,37 @@ int Curl_wait_ms(timediff_t timeout_ms);
} while(0)
#endif
+struct curl_pollfds {
+ struct pollfd *pfds;
+ unsigned int n;
+ unsigned int count;
+ BIT(allocated_pfds);
+};
+
+void Curl_pollfds_init(struct curl_pollfds *cpfds,
+ struct pollfd *static_pfds,
+ unsigned int static_count);
+
+void Curl_pollfds_cleanup(struct curl_pollfds *cpfds);
+
+CURLcode Curl_pollfds_add_ps(struct curl_pollfds *cpfds,
+ struct easy_pollset *ps);
+
+CURLcode Curl_pollfds_add_sock(struct curl_pollfds *cpfds,
+ curl_socket_t sock, short events);
+
+struct curl_waitfds {
+ struct curl_waitfd *wfds;
+ unsigned int n;
+ unsigned int count;
+};
+
+void Curl_waitfds_init(struct curl_waitfds *cwfds,
+ struct curl_waitfd *static_wfds,
+ unsigned int static_count);
+
+CURLcode Curl_waitfds_add_ps(struct curl_waitfds *cwfds,
+ struct easy_pollset *ps);
+
+
#endif /* HEADER_CURL_SELECT_H */
diff --git a/libs/libcurl/src/sendf.c b/libs/libcurl/src/sendf.c
index 67b431f645..390907fae8 100644
--- a/libs/libcurl/src/sendf.c
+++ b/libs/libcurl/src/sendf.c
@@ -289,6 +289,13 @@ static CURLcode cw_download_write(struct Curl_easy *data,
if(nwrite == wmax) {
data->req.download_done = TRUE;
}
+
+ if((type & CLIENTWRITE_EOS) && !data->req.no_body &&
+ (data->req.maxdownload > data->req.bytecount)) {
+ failf(data, "end of response with %" CURL_FORMAT_CURL_OFF_T
+ " bytes missing", data->req.maxdownload - data->req.bytecount);
+ return CURLE_PARTIAL_FILE;
+ }
}
/* Error on too large filesize is handled below, after writing
@@ -309,7 +316,9 @@ static CURLcode cw_download_write(struct Curl_easy *data,
}
/* Update stats, write and report progress */
data->req.bytecount += nwrite;
- ++data->req.bodywrites;
+#ifdef USE_HYPER
+ data->req.bodywritten = TRUE;
+#endif
result = Curl_pgrsSetDownloadCounter(data, data->req.bytecount);
if(result)
return result;
@@ -597,6 +606,14 @@ CURLcode Curl_creader_def_unpause(struct Curl_easy *data,
return CURLE_OK;
}
+bool Curl_creader_def_is_paused(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ (void)data;
+ (void)reader;
+ return FALSE;
+}
+
void Curl_creader_def_done(struct Curl_easy *data,
struct Curl_creader *reader, int premature)
{
@@ -615,6 +632,7 @@ struct cr_in_ctx {
BIT(seen_eos);
BIT(errored);
BIT(has_used_cb);
+ BIT(is_paused);
};
static CURLcode cr_in_init(struct Curl_easy *data, struct Curl_creader *reader)
@@ -637,6 +655,8 @@ static CURLcode cr_in_read(struct Curl_easy *data,
struct cr_in_ctx *ctx = reader->ctx;
size_t nread;
+ ctx->is_paused = FALSE;
+
/* Once we have errored, we will return the same error forever */
if(ctx->errored) {
*pnread = 0;
@@ -688,12 +708,14 @@ static CURLcode cr_in_read(struct Curl_easy *data,
case CURL_READFUNC_PAUSE:
if(data->conn->handler->flags & PROTOPT_NONETWORK) {
/* protocols that work without network cannot be paused. This is
- actually only FILE:// just now, and it can't pause since the transfer
- isn't done using the "normal" procedure. */
+ actually only FILE:// just now, and it cannot pause since the transfer
+ is not done using the "normal" procedure. */
failf(data, "Read callback asked for PAUSE when not supported");
return CURLE_READ_ERROR;
}
/* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
+ CURL_TRC_READ(data, "cr_in_read, callback returned CURL_READFUNC_PAUSE");
+ ctx->is_paused = TRUE;
data->req.keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
*pnread = 0;
*peos = FALSE;
@@ -764,7 +786,7 @@ static CURLcode cr_in_resume_from(struct Curl_easy *data,
failf(data, "Could not seek stream");
return CURLE_READ_ERROR;
}
- /* when seekerr == CURL_SEEKFUNC_CANTSEEK (can't seek to offset) */
+ /* when seekerr == CURL_SEEKFUNC_CANTSEEK (cannot seek to offset) */
do {
char scratch[4*1024];
size_t readthisamountnow =
@@ -798,7 +820,7 @@ static CURLcode cr_in_resume_from(struct Curl_easy *data,
return CURLE_PARTIAL_FILE;
}
}
- /* we've passed, proceed as normal */
+ /* we have passed, proceed as normal */
return CURLE_OK;
}
@@ -850,12 +872,28 @@ static CURLcode cr_in_rewind(struct Curl_easy *data,
}
/* no callback set or failure above, makes us fail at once */
- failf(data, "necessary data rewind wasn't possible");
+ failf(data, "necessary data rewind was not possible");
return CURLE_SEND_FAIL_REWIND;
}
return CURLE_OK;
}
+static CURLcode cr_in_unpause(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ struct cr_in_ctx *ctx = reader->ctx;
+ (void)data;
+ ctx->is_paused = FALSE;
+ return CURLE_OK;
+}
+
+static bool cr_in_is_paused(struct Curl_easy *data,
+ struct Curl_creader *reader)
+{
+ struct cr_in_ctx *ctx = reader->ctx;
+ (void)data;
+ return ctx->is_paused;
+}
static const struct Curl_crtype cr_in = {
"cr-in",
@@ -866,7 +904,8 @@ static const struct Curl_crtype cr_in = {
cr_in_total_length,
cr_in_resume_from,
cr_in_rewind,
- Curl_creader_def_unpause,
+ cr_in_unpause,
+ cr_in_is_paused,
Curl_creader_def_done,
sizeof(struct cr_in_ctx)
};
@@ -979,13 +1018,19 @@ static CURLcode cr_lc_read(struct Curl_easy *data,
return result;
start = i + 1;
if(!data->set.crlf && (data->state.infilesize != -1)) {
- /* we're here only because FTP is in ASCII mode...
+ /* we are here only because FTP is in ASCII mode...
bump infilesize for the LF we just added */
data->state.infilesize++;
/* comment: this might work for FTP, but in HTTP we could not change
* the content length after having started the request... */
}
}
+
+ if(start < i) { /* leftover */
+ result = Curl_bufq_cwrite(&ctx->buf, buf + start, i - start, &n);
+ if(result)
+ return result;
+ }
}
DEBUGASSERT(!Curl_bufq_is_empty(&ctx->buf));
@@ -1022,6 +1067,7 @@ static const struct Curl_crtype cr_lc = {
Curl_creader_def_resume_from,
Curl_creader_def_rewind,
Curl_creader_def_unpause,
+ Curl_creader_def_is_paused,
Curl_creader_def_done,
sizeof(struct cr_lc_ctx)
};
@@ -1195,6 +1241,7 @@ static const struct Curl_crtype cr_null = {
Curl_creader_def_resume_from,
Curl_creader_def_rewind,
Curl_creader_def_unpause,
+ Curl_creader_def_is_paused,
Curl_creader_def_done,
sizeof(struct Curl_creader)
};
@@ -1294,6 +1341,7 @@ static const struct Curl_crtype cr_buf = {
cr_buf_resume_from,
Curl_creader_def_rewind,
Curl_creader_def_unpause,
+ Curl_creader_def_is_paused,
Curl_creader_def_done,
sizeof(struct cr_buf_ctx)
};
@@ -1356,6 +1404,18 @@ CURLcode Curl_creader_unpause(struct Curl_easy *data)
return result;
}
+bool Curl_creader_is_paused(struct Curl_easy *data)
+{
+ struct Curl_creader *reader = data->req.reader_stack;
+
+ while(reader) {
+ if(reader->crt->is_paused(data, reader))
+ return TRUE;
+ reader = reader->next;
+ }
+ return FALSE;
+}
+
void Curl_creader_done(struct Curl_easy *data, int premature)
{
struct Curl_creader *reader = data->req.reader_stack;
diff --git a/libs/libcurl/src/sendf.h b/libs/libcurl/src/sendf.h
index 699760dccc..cda179b920 100644
--- a/libs/libcurl/src/sendf.h
+++ b/libs/libcurl/src/sendf.h
@@ -218,6 +218,7 @@ struct Curl_crtype {
struct Curl_creader *reader, curl_off_t offset);
CURLcode (*rewind)(struct Curl_easy *data, struct Curl_creader *reader);
CURLcode (*unpause)(struct Curl_easy *data, struct Curl_creader *reader);
+ bool (*is_paused)(struct Curl_easy *data, struct Curl_creader *reader);
void (*done)(struct Curl_easy *data,
struct Curl_creader *reader, int premature);
size_t creader_size; /* sizeof() allocated struct Curl_creader */
@@ -268,6 +269,8 @@ CURLcode Curl_creader_def_rewind(struct Curl_easy *data,
struct Curl_creader *reader);
CURLcode Curl_creader_def_unpause(struct Curl_easy *data,
struct Curl_creader *reader);
+bool Curl_creader_def_is_paused(struct Curl_easy *data,
+ struct Curl_creader *reader);
void Curl_creader_def_done(struct Curl_easy *data,
struct Curl_creader *reader, int premature);
@@ -376,6 +379,11 @@ CURLcode Curl_creader_resume_from(struct Curl_easy *data, curl_off_t offset);
CURLcode Curl_creader_unpause(struct Curl_easy *data);
/**
+ * Return TRUE iff any of the installed readers is paused.
+ */
+bool Curl_creader_is_paused(struct Curl_easy *data);
+
+/**
* Tell all client readers that they are done.
*/
void Curl_creader_done(struct Curl_easy *data, int premature);
diff --git a/libs/libcurl/src/setopt.c b/libs/libcurl/src/setopt.c
index db68910187..4956c5674f 100644
--- a/libs/libcurl/src/setopt.c
+++ b/libs/libcurl/src/setopt.c
@@ -139,8 +139,44 @@ static CURLcode setstropt_userpwd(char *option, char **userp, char **passwdp)
return CURLE_OK;
}
+static CURLcode setstropt_interface(
+ char *option, char **devp, char **ifacep, char **hostp)
+{
+ char *dev = NULL;
+ char *iface = NULL;
+ char *host = NULL;
+ size_t len;
+ CURLcode result;
+
+ DEBUGASSERT(devp);
+ DEBUGASSERT(ifacep);
+ DEBUGASSERT(hostp);
+
+ /* Parse the interface details */
+ if(!option || !*option)
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ len = strlen(option);
+ if(len > 255)
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+
+ result = Curl_parse_interface(option, len, &dev, &iface, &host);
+ if(result)
+ return result;
+
+ free(*devp);
+ *devp = dev;
+
+ free(*ifacep);
+ *ifacep = iface;
+
+ free(*hostp);
+ *hostp = host;
+
+ return CURLE_OK;
+}
+
#define C_SSLVERSION_VALUE(x) (x & 0xffff)
-#define C_SSLVERSION_MAX_VALUE(x) (x & 0xffff0000)
+#define C_SSLVERSION_MAX_VALUE(x) ((unsigned long)x & 0xffff0000)
static CURLcode protocol2num(const char *str, curl_prot_t *val)
{
@@ -203,13 +239,17 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
data->set.dns_cache_timeout = (int)arg;
break;
case CURLOPT_CA_CACHE_TIMEOUT:
- arg = va_arg(param, long);
- if(arg < -1)
- return CURLE_BAD_FUNCTION_ARGUMENT;
- else if(arg > INT_MAX)
- arg = INT_MAX;
+ if(Curl_ssl_supports(data, SSLSUPP_CA_CACHE)) {
+ arg = va_arg(param, long);
+ if(arg < -1)
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ else if(arg > INT_MAX)
+ arg = INT_MAX;
- data->set.general_ssl.ca_cache_timeout = (int)arg;
+ data->set.general_ssl.ca_cache_timeout = (int)arg;
+ }
+ else
+ return CURLE_NOT_BUILT_IN;
break;
case CURLOPT_DNS_USE_GLOBAL_CACHE:
/* deprecated */
@@ -312,7 +352,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
break;
case CURLOPT_FAILONERROR:
/*
- * Don't output the >=400 error code HTML-page, but instead only
+ * Do not output the >=400 error code HTML-page, but instead only
* return error.
*/
data->set.http_fail_on_error = (0 != va_arg(param, long));
@@ -461,7 +501,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
arg = va_arg(param, long);
version = C_SSLVERSION_VALUE(arg);
- version_max = C_SSLVERSION_MAX_VALUE(arg);
+ version_max = (long)C_SSLVERSION_MAX_VALUE(arg);
if(version < CURL_SSLVERSION_DEFAULT ||
version == CURL_SSLVERSION_SSLv2 ||
@@ -582,7 +622,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
*
* If the encoding is set to "" we use an Accept-Encoding header that
* encompasses all the encodings we support.
- * If the encoding is set to NULL we don't send an Accept-Encoding header
+ * If the encoding is set to NULL we do not send an Accept-Encoding header
* and ignore an received Content-Encoding header.
*
*/
@@ -646,7 +686,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
case CURLOPT_POST:
/* Does this option serve a purpose anymore? Yes it does, when
- CURLOPT_POSTFIELDS isn't used and the POST data is read off the
+ CURLOPT_POSTFIELDS is not used and the POST data is read off the
callback! */
if(va_arg(param, long)) {
data->set.method = HTTPREQ_POST;
@@ -749,7 +789,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
/* general protection against mistakes and abuse */
if(strlen(argptr) > CURL_MAX_INPUT_LENGTH)
return CURLE_BAD_FUNCTION_ARGUMENT;
- /* append the cookie file name to the list of file names, and deal with
+ /* append the cookie filename to the list of filenames, and deal with
them later */
cl = curl_slist_append(data->state.cookielist, argptr);
if(!cl) {
@@ -765,7 +805,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
data->state.cookielist = NULL;
if(!data->share || !data->share->cookies) {
- /* throw away all existing cookies if this isn't a shared cookie
+ /* throw away all existing cookies if this is not a shared cookie
container */
Curl_cookie_clearall(data->cookies);
Curl_cookie_cleanup(data->cookies);
@@ -777,7 +817,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
case CURLOPT_COOKIEJAR:
/*
- * Set cookie file name to dump all cookies to when we're done.
+ * Set cookie filename to dump all cookies to when we are done.
*/
result = Curl_setstropt(&data->set.str[STRING_COOKIEJAR],
va_arg(param, char *));
@@ -928,7 +968,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
break;
case CURLOPT_HTTP09_ALLOWED:
- arg = va_arg(param, unsigned long);
+ arg = (long)va_arg(param, unsigned long);
if(arg > 1L)
return CURLE_BAD_FUNCTION_ARGUMENT;
#ifdef USE_HYPER
@@ -1007,7 +1047,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
auth &= ~CURLAUTH_DIGEST_IE; /* unset ie digest bit */
}
- /* switch off bits we can't support */
+ /* switch off bits we cannot support */
#ifndef USE_NTLM
auth &= ~CURLAUTH_NTLM; /* no NTLM support */
#endif
@@ -1039,7 +1079,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
result = Curl_setstropt(&data->set.str[STRING_CUSTOMREQUEST],
va_arg(param, char *));
- /* we don't set
+ /* we do not set
data->set.method = HTTPREQ_CUSTOM;
here, we continue as if we were using the already set type
and this just changes the actual request keyword */
@@ -1085,7 +1125,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
auth |= CURLAUTH_DIGEST; /* set standard digest bit */
auth &= ~CURLAUTH_DIGEST_IE; /* unset ie digest bit */
}
- /* switch off bits we can't support */
+ /* switch off bits we cannot support */
#ifndef USE_NTLM
auth &= ~CURLAUTH_NTLM; /* no NTLM support */
#endif
@@ -1115,7 +1155,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
* Set proxy server:port to use as proxy.
*
* If the proxy is set to "" (and CURLOPT_SOCKS_PROXY is set to "" or NULL)
- * we explicitly say that we don't want to use a proxy
+ * we explicitly say that we do not want to use a proxy
* (even though there might be environment variables saying so).
*
* Setting it to NULL, means no proxy but allows the environment variables
@@ -1129,7 +1169,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
/*
* Set proxy server:port to use as SOCKS proxy.
*
- * If the proxy is set to "" or NULL we explicitly say that we don't want
+ * If the proxy is set to "" or NULL we explicitly say that we do not want
* to use the socks proxy.
*/
result = Curl_setstropt(&data->set.str[STRING_PRE_PROXY],
@@ -1500,7 +1540,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
case CURLOPT_USERNAME:
/*
- * authentication user name to use in the operation
+ * authentication username to use in the operation
*/
result = Curl_setstropt(&data->set.str[STRING_USERNAME],
va_arg(param, char *));
@@ -1541,7 +1581,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
* Prefix the HOST with dash (-) to _remove_ the entry from the cache.
*
* This API can remove any entry from the DNS cache, but only entries
- * that aren't actually in use right now will be pruned immediately.
+ * that are not actually in use right now will be pruned immediately.
*/
data->set.resolve = va_arg(param, struct curl_slist *);
data->state.resolve = data->set.resolve;
@@ -1598,7 +1638,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
break;
case CURLOPT_PROXYUSERNAME:
/*
- * authentication user name to use in the operation
+ * authentication username to use in the operation
*/
result = Curl_setstropt(&data->set.str[STRING_PROXYUSERNAME],
va_arg(param, char *));
@@ -1650,7 +1690,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
*/
data->set.fdebug = va_arg(param, curl_debug_callback);
/*
- * if the callback provided is NULL, it'll use the default callback
+ * if the callback provided is NULL, it will use the default callback
*/
break;
case CURLOPT_DEBUGDATA:
@@ -1723,7 +1763,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
break;
case CURLOPT_SSLCERT:
/*
- * String that holds file name of the SSL certificate to use
+ * String that holds filename of the SSL certificate to use
*/
result = Curl_setstropt(&data->set.str[STRING_CERT],
va_arg(param, char *));
@@ -1738,7 +1778,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
#ifndef CURL_DISABLE_PROXY
case CURLOPT_PROXY_SSLCERT:
/*
- * String that holds file name of the SSL certificate to use for proxy
+ * String that holds filename of the SSL certificate to use for proxy
*/
result = Curl_setstropt(&data->set.str[STRING_CERT_PROXY],
va_arg(param, char *));
@@ -1769,7 +1809,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
#endif
case CURLOPT_SSLKEY:
/*
- * String that holds file name of the SSL key to use
+ * String that holds filename of the SSL key to use
*/
result = Curl_setstropt(&data->set.str[STRING_KEY],
va_arg(param, char *));
@@ -1784,7 +1824,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
#ifndef CURL_DISABLE_PROXY
case CURLOPT_PROXY_SSLKEY:
/*
- * String that holds file name of the SSL key to use for proxy
+ * String that holds filename of the SSL key to use for proxy
*/
result = Curl_setstropt(&data->set.str[STRING_KEY_PROXY],
va_arg(param, char *));
@@ -1877,8 +1917,10 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
* Set what interface or address/hostname to bind the socket to when
* performing an operation and thus what from-IP your connection will use.
*/
- result = Curl_setstropt(&data->set.str[STRING_DEVICE],
- va_arg(param, char *));
+ result = setstropt_interface(va_arg(param, char *),
+ &data->set.str[STRING_DEVICE],
+ &data->set.str[STRING_INTERFACE],
+ &data->set.str[STRING_BINDHOST]);
break;
#ifndef CURL_DISABLE_BINDLOCAL
case CURLOPT_LOCALPORT:
@@ -1939,12 +1981,12 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
#endif
case CURLOPT_SSL_VERIFYHOST:
/*
- * Enable verification of the host name in the peer certificate
+ * Enable verification of the hostname in the peer certificate
*/
arg = va_arg(param, long);
/* Obviously people are not reading documentation and too many thought
- this argument took a boolean when it wasn't and misused it.
+ this argument took a boolean when it was not and misused it.
Treat 1 and 2 the same */
data->set.ssl.primary.verifyhost = !!(arg & 3);
@@ -1954,7 +1996,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
#ifndef CURL_DISABLE_DOH
case CURLOPT_DOH_SSL_VERIFYHOST:
/*
- * Enable verification of the host name in the peer certificate for DoH
+ * Enable verification of the hostname in the peer certificate for DoH
*/
arg = va_arg(param, long);
@@ -1965,7 +2007,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
#ifndef CURL_DISABLE_PROXY
case CURLOPT_PROXY_SSL_VERIFYHOST:
/*
- * Enable verification of the host name in the peer certificate for proxy
+ * Enable verification of the hostname in the peer certificate for proxy
*/
arg = va_arg(param, long);
@@ -2046,7 +2088,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
case CURLOPT_PINNEDPUBLICKEY:
/*
* Set pinned public key for SSL connection.
- * Specify file name of the public key in DER format.
+ * Specify filename of the public key in DER format.
*/
#ifdef USE_SSL
if(Curl_ssl_supports(data, SSLSUPP_PINNEDPUBKEY))
@@ -2060,7 +2102,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
case CURLOPT_PROXY_PINNEDPUBLICKEY:
/*
* Set pinned public key for SSL connection.
- * Specify file name of the public key in DER format.
+ * Specify filename of the public key in DER format.
*/
#ifdef USE_SSL
if(Curl_ssl_supports(data, SSLSUPP_PINNEDPUBKEY))
@@ -2073,7 +2115,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
#endif
case CURLOPT_CAINFO:
/*
- * Set CA info for SSL connection. Specify file name of the CA certificate
+ * Set CA info for SSL connection. Specify filename of the CA certificate
*/
result = Curl_setstropt(&data->set.str[STRING_SSL_CAFILE],
va_arg(param, char *));
@@ -2095,7 +2137,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
#ifndef CURL_DISABLE_PROXY
case CURLOPT_PROXY_CAINFO:
/*
- * Set CA info SSL connection for proxy. Specify file name of the
+ * Set CA info SSL connection for proxy. Specify filename of the
* CA certificate
*/
result = Curl_setstropt(&data->set.str[STRING_SSL_CAFILE_PROXY],
@@ -2148,7 +2190,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
#endif
case CURLOPT_CRLFILE:
/*
- * Set CRL file info for SSL connection. Specify file name of the CRL
+ * Set CRL file info for SSL connection. Specify filename of the CRL
* to check certificates revocation
*/
result = Curl_setstropt(&data->set.str[STRING_SSL_CRLFILE],
@@ -2157,7 +2199,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
#ifndef CURL_DISABLE_PROXY
case CURLOPT_PROXY_CRLFILE:
/*
- * Set CRL file info for SSL connection for proxy. Specify file name of the
+ * Set CRL file info for SSL connection for proxy. Specify filename of the
* CRL to check certificates revocation
*/
result = Curl_setstropt(&data->set.str[STRING_SSL_CRLFILE_PROXY],
@@ -2207,7 +2249,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
case CURLOPT_BUFFERSIZE:
/*
* The application kindly asks for a differently sized receive buffer.
- * If it seems reasonable, we'll use it.
+ * If it seems reasonable, we will use it.
*/
arg = va_arg(param, long);
@@ -2495,16 +2537,17 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
break;
case CURLOPT_SSL_SESSIONID_CACHE:
- data->set.ssl.primary.sessionid = (0 != va_arg(param, long));
+ data->set.ssl.primary.cache_session = (0 != va_arg(param, long));
#ifndef CURL_DISABLE_PROXY
- data->set.proxy_ssl.primary.sessionid = data->set.ssl.primary.sessionid;
+ data->set.proxy_ssl.primary.cache_session =
+ data->set.ssl.primary.cache_session;
#endif
break;
#ifdef USE_SSH
/* we only include SSH options if explicitly built to support SSH */
case CURLOPT_SSH_AUTH_TYPES:
- data->set.ssh_auth_types = (unsigned int)va_arg(param, long);
+ data->set.ssh_auth_types = (int)va_arg(param, long);
break;
case CURLOPT_SSH_PUBLIC_KEYFILE:
@@ -2533,7 +2576,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
case CURLOPT_SSH_KNOWNHOSTS:
/*
- * Store the file name to read known hosts from.
+ * Store the filename to read known hosts from.
*/
result = Curl_setstropt(&data->set.str[STRING_SSH_KNOWNHOSTS],
va_arg(param, char *));
@@ -2587,7 +2630,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
data->set.http_te_skip = (0 == va_arg(param, long));
break;
#else
- return CURLE_NOT_BUILT_IN; /* hyper doesn't support */
+ return CURLE_NOT_BUILT_IN; /* hyper does not support */
#endif
case CURLOPT_HTTP_CONTENT_DECODING:
@@ -2625,7 +2668,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
/*
* Use this scope id when using IPv6
* We always get longs when passed plain numericals so we should check
- * that the value fits into an unsigned 32 bit integer.
+ * that the value fits into an unsigned 32-bit integer.
*/
uarg = va_arg(param, unsigned long);
#if SIZEOF_LONG > 4
@@ -2668,7 +2711,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
}
case CURLOPT_DEFAULT_PROTOCOL:
- /* Set the protocol to use when the URL doesn't include any protocol */
+ /* Set the protocol to use when the URL does not include any protocol */
result = Curl_setstropt(&data->set.str[STRING_DEFAULT_PROTOCOL],
va_arg(param, char *));
break;
@@ -2918,6 +2961,14 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
arg = INT_MAX;
data->set.tcp_keepintvl = (int)arg;
break;
+ case CURLOPT_TCP_KEEPCNT:
+ arg = va_arg(param, long);
+ if(arg < 0)
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ else if(arg > INT_MAX)
+ arg = INT_MAX;
+ data->set.tcp_keepcnt = (int)arg;
+ break;
case CURLOPT_TCP_FASTOPEN:
#if defined(CONNECT_DATA_IDEMPOTENT) || defined(MSG_FASTOPEN) || \
defined(TCP_FASTOPEN_CONNECT)
@@ -3049,7 +3100,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
result = Curl_setstropt(&data->set.str[STRING_HSTS], argptr);
if(result)
return result;
- /* this needs to build a list of file names to read from, so that it can
+ /* this needs to build a list of filenames to read from, so that it can
read them later, as we might get a shared HSTS handle to load them
into */
h = curl_slist_append(data->state.hstslist, argptr);
diff --git a/libs/libcurl/src/setup-os400.h b/libs/libcurl/src/setup-os400.h
index d3f98daaf5..e506b93d89 100644
--- a/libs/libcurl/src/setup-os400.h
+++ b/libs/libcurl/src/setup-os400.h
@@ -46,6 +46,8 @@ typedef unsigned long u_int32_t;
#include <qsoasync.h>
#include <gssapi.h>
+#ifdef BUILDING_LIBCURL
+
extern int Curl_getaddrinfo_a(const char *nodename,
const char *servname,
const struct addrinfo *hints,
@@ -141,4 +143,6 @@ extern int Curl_os400_getsockname(int sd, struct sockaddr *addr, int *addrlen);
#define inflateEnd Curl_os400_inflateEnd
#endif
+#endif /* BUILDING_LIBCURL */
+
#endif /* HEADER_CURL_SETUP_OS400_H */
diff --git a/libs/libcurl/src/setup-vms.h b/libs/libcurl/src/setup-vms.h
index 459331185a..ec857fec27 100644
--- a/libs/libcurl/src/setup-vms.h
+++ b/libs/libcurl/src/setup-vms.h
@@ -144,7 +144,7 @@ static struct passwd *vms_getpwuid(uid_t uid)
{
struct passwd *my_passwd;
-/* Hack needed to support 64 bit builds, decc_getpwnam is 32 bit only */
+/* Hack needed to support 64-bit builds, decc_getpwnam is 32-bit only */
#ifdef __DECC
# if __INITIAL_POINTER_SIZE
__char_ptr32 unix_path;
diff --git a/libs/libcurl/src/setup-win32.h b/libs/libcurl/src/setup-win32.h
index 19f5cf154b..70789ec840 100644
--- a/libs/libcurl/src/setup-win32.h
+++ b/libs/libcurl/src/setup-win32.h
@@ -65,7 +65,7 @@
* Include header files for windows builds before redefining anything.
* Use this preprocessor block only to include or exclude windows.h,
* winsock2.h or ws2tcpip.h. Any other windows thing belongs
- * to any other further and independent block. Under Cygwin things work
+ * to any other further and independent block. Under Cygwin things work
* just as under linux (e.g. <sys/socket.h>) and the winsock headers should
* never be included when __CYGWIN__ is defined.
*/
@@ -78,7 +78,7 @@
# error "_UNICODE is defined but UNICODE is not defined"
# endif
/*
- * Don't include unneeded stuff in Windows headers to avoid compiler
+ * Do not include unneeded stuff in Windows headers to avoid compiler
* warnings and macro clashes.
* Make sure to define this macro before including any Windows headers.
*/
diff --git a/libs/libcurl/src/sha256.c b/libs/libcurl/src/sha256.c
index 24b1a7a76b..f7f7649477 100644
--- a/libs/libcurl/src/sha256.c
+++ b/libs/libcurl/src/sha256.c
@@ -334,14 +334,14 @@ static const unsigned long K[64] = {
#define RORc(x, y) \
(((((unsigned long)(x) & 0xFFFFFFFFUL) >> (unsigned long)((y) & 31)) | \
((unsigned long)(x) << (unsigned long)(32 - ((y) & 31)))) & 0xFFFFFFFFUL)
-#define Ch(x,y,z) (z ^ (x & (y ^ z)))
-#define Maj(x,y,z) (((x | y) & z) | (x & y))
-#define S(x, n) RORc((x), (n))
-#define R(x, n) (((x)&0xFFFFFFFFUL)>>(n))
-#define Sigma0(x) (S(x, 2) ^ S(x, 13) ^ S(x, 22))
-#define Sigma1(x) (S(x, 6) ^ S(x, 11) ^ S(x, 25))
-#define Gamma0(x) (S(x, 7) ^ S(x, 18) ^ R(x, 3))
-#define Gamma1(x) (S(x, 17) ^ S(x, 19) ^ R(x, 10))
+#define Sha256_Ch(x,y,z) (z ^ (x & (y ^ z)))
+#define Sha256_Maj(x,y,z) (((x | y) & z) | (x & y))
+#define Sha256_S(x, n) RORc((x), (n))
+#define Sha256_R(x, n) (((x)&0xFFFFFFFFUL)>>(n))
+#define Sigma0(x) (Sha256_S(x, 2) ^ Sha256_S(x, 13) ^ Sha256_S(x, 22))
+#define Sigma1(x) (Sha256_S(x, 6) ^ Sha256_S(x, 11) ^ Sha256_S(x, 25))
+#define Gamma0(x) (Sha256_S(x, 7) ^ Sha256_S(x, 18) ^ Sha256_R(x, 3))
+#define Gamma1(x) (Sha256_S(x, 17) ^ Sha256_S(x, 19) ^ Sha256_R(x, 10))
/* Compress 512-bits */
static int sha256_compress(struct sha256_state *md,
@@ -364,12 +364,12 @@ static int sha256_compress(struct sha256_state *md,
}
/* Compress */
-#define RND(a,b,c,d,e,f,g,h,i) \
- do { \
- unsigned long t0 = h + Sigma1(e) + Ch(e, f, g) + K[i] + W[i]; \
- unsigned long t1 = Sigma0(a) + Maj(a, b, c); \
- d += t0; \
- h = t0 + t1; \
+#define RND(a,b,c,d,e,f,g,h,i) \
+ do { \
+ unsigned long t0 = h + Sigma1(e) + Sha256_Ch(e, f, g) + K[i] + W[i]; \
+ unsigned long t1 = Sigma0(a) + Sha256_Maj(a, b, c); \
+ d += t0; \
+ h = t0 + t1; \
} while(0)
for(i = 0; i < 64; ++i) {
@@ -467,7 +467,7 @@ static int my_sha256_final(unsigned char *out,
md->buf[md->curlen++] = (unsigned char)0x80;
/* If the length is currently above 56 bytes we append zeros
- * then compress. Then we can fall back to padding zeros and length
+ * then compress. Then we can fall back to padding zeros and length
* encoding like normal.
*/
if(md->curlen > 56) {
@@ -542,4 +542,4 @@ const struct HMAC_params Curl_HMAC_SHA256[] = {
};
-#endif /* AWS, DIGEST, or libSSH2 */
+#endif /* AWS, DIGEST, or libssh2 */
diff --git a/libs/libcurl/src/share.c b/libs/libcurl/src/share.c
index 96accead9a..d233ffa116 100644
--- a/libs/libcurl/src/share.c
+++ b/libs/libcurl/src/share.c
@@ -26,6 +26,7 @@
#include <curl/curl.h>
#include "urldata.h"
+#include "connect.h"
#include "share.h"
#include "psl.h"
#include "vtls/vtls.h"
@@ -64,7 +65,7 @@ curl_share_setopt(struct Curl_share *share, CURLSHoption option, ...)
return CURLSHE_INVALID;
if(share->dirty)
- /* don't allow setting options while one or more handles are already
+ /* do not allow setting options while one or more handles are already
using this share */
return CURLSHE_IN_USE;
@@ -119,7 +120,7 @@ curl_share_setopt(struct Curl_share *share, CURLSHoption option, ...)
break;
case CURL_LOCK_DATA_CONNECT:
- if(Curl_conncache_init(&share->conn_cache, 103))
+ if(Curl_conncache_init(&share->conn_cache, NULL, 103))
res = CURLSHE_NOMEM;
break;
@@ -268,7 +269,7 @@ Curl_share_lock(struct Curl_easy *data, curl_lock_data type,
if(share->lockfunc) /* only call this if set! */
share->lockfunc(data, type, accesstype, share->clientdata);
}
- /* else if we don't share this, pretend successful lock */
+ /* else if we do not share this, pretend successful lock */
return CURLSHE_OK;
}
diff --git a/libs/libcurl/src/share.h b/libs/libcurl/src/share.h
index e3e130daed..6eaf0a84ae 100644
--- a/libs/libcurl/src/share.h
+++ b/libs/libcurl/src/share.h
@@ -34,7 +34,7 @@
#define CURL_GOOD_SHARE 0x7e117a1e
#define GOOD_SHARE_HANDLE(x) ((x) && (x)->magic == CURL_GOOD_SHARE)
-/* this struct is libcurl-private, don't export details */
+/* this struct is libcurl-private, do not export details */
struct Curl_share {
unsigned int magic; /* CURL_GOOD_SHARE */
unsigned int specifier;
diff --git a/libs/libcurl/src/smb.c b/libs/libcurl/src/smb.c
index a927c7dba3..27d5a8d325 100644
--- a/libs/libcurl/src/smb.c
+++ b/libs/libcurl/src/smb.c
@@ -559,7 +559,7 @@ static void smb_format_message(struct Curl_easy *data, struct smb_header *h,
h->flags2 = smb_swap16(SMB_FLAGS2_IS_LONG_NAME | SMB_FLAGS2_KNOWS_LONG_NAME);
h->uid = smb_swap16(smbc->uid);
h->tid = smb_swap16(req->tid);
- pid = getpid();
+ pid = (unsigned int)getpid();
h->pid_high = smb_swap16((unsigned short)(pid >> 16));
h->pid = smb_swap16((unsigned short) pid);
}
@@ -1071,7 +1071,7 @@ static CURLcode smb_request_state(struct Curl_easy *data, bool *done)
break;
case SMB_CLOSE:
- /* We don't care if the close failed, proceed to tree disconnect anyway */
+ /* We do not care if the close failed, proceed to tree disconnect anyway */
next_state = SMB_TREE_DISCONNECT;
break;
diff --git a/libs/libcurl/src/smtp.c b/libs/libcurl/src/smtp.c
index 197697bfa4..02321ebf85 100644
--- a/libs/libcurl/src/smtp.c
+++ b/libs/libcurl/src/smtp.c
@@ -534,16 +534,16 @@ static CURLcode smtp_perform_command(struct Curl_easy *data)
if(smtp->rcpt) {
/* We notify the server we are sending UTF-8 data if a) it supports the
SMTPUTF8 extension and b) The mailbox contains UTF-8 characters, in
- either the local address or host name parts. This is regardless of
- whether the host name is encoded using IDN ACE */
+ either the local address or hostname parts. This is regardless of
+ whether the hostname is encoded using IDN ACE */
bool utf8 = FALSE;
if((!smtp->custom) || (!smtp->custom[0])) {
char *address = NULL;
struct hostname host = { NULL, NULL, NULL, NULL };
- /* Parse the mailbox to verify into the local address and host name
- parts, converting the host name to an IDN A-label if necessary */
+ /* Parse the mailbox to verify into the local address and hostname
+ parts, converting the hostname to an IDN A-label if necessary */
result = smtp_parse_address(smtp->rcpt->data,
&address, &host);
if(result)
@@ -555,7 +555,7 @@ static CURLcode smtp_perform_command(struct Curl_easy *data)
((host.encalloc) || (!Curl_is_ASCII_name(address)) ||
(!Curl_is_ASCII_name(host.name)));
- /* Send the VRFY command (Note: The host name part may be absent when the
+ /* Send the VRFY command (Note: The hostname part may be absent when the
host is a local system) */
result = Curl_pp_sendf(data, &conn->proto.smtpc.pp, "VRFY %s%s%s%s",
address,
@@ -607,8 +607,8 @@ static CURLcode smtp_perform_mail(struct Curl_easy *data)
/* We notify the server we are sending UTF-8 data if a) it supports the
SMTPUTF8 extension and b) The mailbox contains UTF-8 characters, in
- either the local address or host name parts. This is regardless of
- whether the host name is encoded using IDN ACE */
+ either the local address or hostname parts. This is regardless of
+ whether the hostname is encoded using IDN ACE */
bool utf8 = FALSE;
/* Calculate the FROM parameter */
@@ -616,8 +616,8 @@ static CURLcode smtp_perform_mail(struct Curl_easy *data)
char *address = NULL;
struct hostname host = { NULL, NULL, NULL, NULL };
- /* Parse the FROM mailbox into the local address and host name parts,
- converting the host name to an IDN A-label if necessary */
+ /* Parse the FROM mailbox into the local address and hostname parts,
+ converting the hostname to an IDN A-label if necessary */
result = smtp_parse_address(data->set.str[STRING_MAIL_FROM],
&address, &host);
if(result)
@@ -635,8 +635,8 @@ static CURLcode smtp_perform_mail(struct Curl_easy *data)
Curl_free_idnconverted_hostname(&host);
}
else
- /* An invalid mailbox was provided but we'll simply let the server worry
- about that and reply with a 501 error */
+ /* An invalid mailbox was provided but we will simply let the server
+ worry about that and reply with a 501 error */
from = aprintf("<%s>", address);
free(address);
@@ -656,8 +656,8 @@ static CURLcode smtp_perform_mail(struct Curl_easy *data)
char *address = NULL;
struct hostname host = { NULL, NULL, NULL, NULL };
- /* Parse the AUTH mailbox into the local address and host name parts,
- converting the host name to an IDN A-label if necessary */
+ /* Parse the AUTH mailbox into the local address and hostname parts,
+ converting the hostname to an IDN A-label if necessary */
result = smtp_parse_address(data->set.str[STRING_MAIL_AUTH],
&address, &host);
if(result)
@@ -676,7 +676,7 @@ static CURLcode smtp_perform_mail(struct Curl_easy *data)
Curl_free_idnconverted_hostname(&host);
}
else
- /* An invalid mailbox was provided but we'll simply let the server
+ /* An invalid mailbox was provided but we will simply let the server
worry about it */
auth = aprintf("<%s>", address);
free(address);
@@ -695,7 +695,7 @@ static CURLcode smtp_perform_mail(struct Curl_easy *data)
/* Prepare the mime data if some. */
if(data->set.mimepost.kind != MIMEKIND_NONE) {
/* Use the whole structure as data. */
- data->set.mimepost.flags &= ~MIME_BODY_ONLY;
+ data->set.mimepost.flags &= ~(unsigned int)MIME_BODY_ONLY;
/* Add external headers and mime version. */
curl_mime_headers(&data->set.mimepost, data->set.headers, 0);
@@ -731,7 +731,7 @@ static CURLcode smtp_perform_mail(struct Curl_easy *data)
}
}
- /* If the mailboxes in the FROM and AUTH parameters don't include a UTF-8
+ /* If the mailboxes in the FROM and AUTH parameters do not include a UTF-8
based address then quickly scan through the recipient list and check if
any there do, as we need to correctly identify our support for SMTPUTF8
in the envelope, as per RFC-6531 sect. 3.4 */
@@ -740,7 +740,7 @@ static CURLcode smtp_perform_mail(struct Curl_easy *data)
struct curl_slist *rcpt = smtp->rcpt;
while(rcpt && !utf8) {
- /* Does the host name contain non-ASCII characters? */
+ /* Does the hostname contain non-ASCII characters? */
if(!Curl_is_ASCII_name(rcpt->data))
utf8 = TRUE;
@@ -790,8 +790,8 @@ static CURLcode smtp_perform_rcpt_to(struct Curl_easy *data)
char *address = NULL;
struct hostname host = { NULL, NULL, NULL, NULL };
- /* Parse the recipient mailbox into the local address and host name parts,
- converting the host name to an IDN A-label if necessary */
+ /* Parse the recipient mailbox into the local address and hostname parts,
+ converting the hostname to an IDN A-label if necessary */
result = smtp_parse_address(smtp->rcpt->data,
&address, &host);
if(result)
@@ -802,7 +802,7 @@ static CURLcode smtp_perform_rcpt_to(struct Curl_easy *data)
result = Curl_pp_sendf(data, &conn->proto.smtpc.pp, "RCPT TO:<%s@%s>",
address, host.name);
else
- /* An invalid mailbox was provided but we'll simply let the server worry
+ /* An invalid mailbox was provided but we will simply let the server worry
about that and reply with a 501 error */
result = Curl_pp_sendf(data, &conn->proto.smtpc.pp, "RCPT TO:<%s>",
address);
@@ -958,7 +958,7 @@ static CURLcode smtp_state_ehlo_resp(struct Curl_easy *data,
if(smtpcode != 1) {
if(data->set.use_ssl && !Curl_conn_is_ssl(conn, FIRSTSOCKET)) {
- /* We don't have a SSL/TLS connection yet, but SSL is requested */
+ /* We do not have a SSL/TLS connection yet, but SSL is requested */
if(smtpc->tls_supported)
/* Switch to TLS connection now */
result = smtp_perform_starttls(data, conn);
@@ -1102,7 +1102,7 @@ static CURLcode smtp_state_rcpt_resp(struct Curl_easy *data,
is_smtp_err = (smtpcode/100 != 2) ? TRUE : FALSE;
- /* If there's multiple RCPT TO to be issued, it's possible to ignore errors
+ /* If there is multiple RCPT TO to be issued, it is possible to ignore errors
and proceed with only the valid addresses. */
is_smtp_blocking_err =
(is_smtp_err && !data->set.mail_rcpt_allowfails) ? TRUE : FALSE;
@@ -1129,7 +1129,7 @@ static CURLcode smtp_state_rcpt_resp(struct Curl_easy *data,
/* Send the next RCPT TO command */
result = smtp_perform_rcpt_to(data);
else {
- /* We weren't able to issue a successful RCPT TO command while going
+ /* We were not able to issue a successful RCPT TO command while going
over recipients (potentially multiple). Sending back last error. */
if(!smtp->rcpt_had_ok) {
failf(data, "RCPT failed: %d (last error)", smtp->rcpt_last_error);
@@ -1164,7 +1164,7 @@ static CURLcode smtp_state_data_resp(struct Curl_easy *data, int smtpcode,
Curl_pgrsSetUploadSize(data, data->state.infilesize);
/* SMTP upload */
- Curl_xfer_setup(data, -1, -1, FALSE, FIRSTSOCKET);
+ Curl_xfer_setup1(data, CURL_XFER_SEND, -1, FALSE);
/* End of DO phase */
smtp_state(data, SMTP_STOP);
@@ -1202,6 +1202,7 @@ static CURLcode smtp_statemachine(struct Curl_easy *data,
size_t nread = 0;
/* Busy upgrading the connection; right now all I/O is SSL/TLS, not SMTP */
+upgrade_tls:
if(smtpc->state == SMTP_UPGRADETLS)
return smtp_perform_upgrade_tls(data);
@@ -1238,6 +1239,10 @@ static CURLcode smtp_statemachine(struct Curl_easy *data,
case SMTP_STARTTLS:
result = smtp_state_starttls_resp(data, smtpcode, smtpc->state);
+ /* During UPGRADETLS, leave the read loop as we need to connect
+ * (e.g. TLS handshake) before we continue sending/receiving. */
+ if(!result && (smtpc->state == SMTP_UPGRADETLS))
+ goto upgrade_tls;
break;
case SMTP_AUTH:
@@ -1447,10 +1452,10 @@ static CURLcode smtp_perform(struct Curl_easy *data, bool *connected,
/* Store the first recipient (or NULL if not specified) */
smtp->rcpt = data->set.mail_rcpt;
- /* Track of whether we've successfully sent at least one RCPT TO command */
+ /* Track of whether we have successfully sent at least one RCPT TO command */
smtp->rcpt_had_ok = FALSE;
- /* Track of the last error we've received by sending RCPT TO command */
+ /* Track of the last error we have received by sending RCPT TO command */
smtp->rcpt_last_error = 0;
/* Initial data character is the first character in line: it is implicitly
@@ -1550,7 +1555,7 @@ static CURLcode smtp_dophase_done(struct Curl_easy *data, bool connected)
if(smtp->transfer != PPTRANSFER_BODY)
/* no data to transfer */
- Curl_xfer_setup(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup_nop(data);
return CURLE_OK;
}
@@ -1708,7 +1713,7 @@ static CURLcode smtp_parse_custom_request(struct Curl_easy *data)
* smtp_parse_address()
*
* Parse the fully qualified mailbox address into a local address part and the
- * host name, converting the host name to an IDN A-label, as per RFC-5890, if
+ * hostname, converting the hostname to an IDN A-label, as per RFC-5890, if
* necessary.
*
* Parameters:
@@ -1719,8 +1724,8 @@ static CURLcode smtp_parse_custom_request(struct Curl_easy *data)
* address [in/out] - A new allocated buffer which holds the local
* address part of the mailbox. This buffer must be
* free'ed by the caller.
- * host [in/out] - The host name structure that holds the original,
- * and optionally encoded, host name.
+ * host [in/out] - The hostname structure that holds the original,
+ * and optionally encoded, hostname.
* Curl_free_idnconverted_hostname() must be called
* once the caller has finished with the structure.
*
@@ -1728,14 +1733,14 @@ static CURLcode smtp_parse_custom_request(struct Curl_easy *data)
*
* Notes:
*
- * Should a UTF-8 host name require conversion to IDN ACE and we cannot honor
+ * Should a UTF-8 hostname require conversion to IDN ACE and we cannot honor
* that conversion then we shall return success. This allow the caller to send
* the data to the server as a U-label (as per RFC-6531 sect. 3.2).
*
* If an mailbox '@' separator cannot be located then the mailbox is considered
* to be either a local mailbox or an invalid mailbox (depending on what the
* calling function deems it to be) then the input will simply be returned in
- * the address part with the host name being NULL.
+ * the address part with the hostname being NULL.
*/
static CURLcode smtp_parse_address(const char *fqma, char **address,
struct hostname *host)
@@ -1744,7 +1749,7 @@ static CURLcode smtp_parse_address(const char *fqma, char **address,
size_t length;
/* Duplicate the fully qualified email address so we can manipulate it,
- ensuring it doesn't contain the delimiters if specified */
+ ensuring it does not contain the delimiters if specified */
char *dup = strdup(fqma[0] == '<' ? fqma + 1 : fqma);
if(!dup)
return CURLE_OUT_OF_MEMORY;
@@ -1755,17 +1760,17 @@ static CURLcode smtp_parse_address(const char *fqma, char **address,
dup[length - 1] = '\0';
}
- /* Extract the host name from the address (if we can) */
+ /* Extract the hostname from the address (if we can) */
host->name = strpbrk(dup, "@");
if(host->name) {
*host->name = '\0';
host->name = host->name + 1;
- /* Attempt to convert the host name to IDN ACE */
+ /* Attempt to convert the hostname to IDN ACE */
(void) Curl_idnconvert_hostname(host);
/* If Curl_idnconvert_hostname() fails then we shall attempt to continue
- and send the host name using UTF-8 rather than as 7-bit ACE (which is
+ and send the hostname using UTF-8 rather than as 7-bit ACE (which is
our preference) */
}
@@ -1925,6 +1930,7 @@ static const struct Curl_crtype cr_eob = {
Curl_creader_def_resume_from,
Curl_creader_def_rewind,
Curl_creader_def_unpause,
+ Curl_creader_def_is_paused,
Curl_creader_def_done,
sizeof(struct cr_eob_ctx)
};
diff --git a/libs/libcurl/src/socketpair.c b/libs/libcurl/src/socketpair.c
index 06eaa2b5c7..22b067335a 100644
--- a/libs/libcurl/src/socketpair.c
+++ b/libs/libcurl/src/socketpair.c
@@ -27,14 +27,31 @@
#include "urldata.h"
#include "rand.h"
-#if defined(HAVE_PIPE) && defined(HAVE_FCNTL)
+#if defined(USE_EVENTFD)
+#ifdef HAVE_SYS_EVENTFD_H
+#include <sys/eventfd.h>
+#endif
+
+int Curl_eventfd(curl_socket_t socks[2], bool nonblocking)
+{
+ int efd = eventfd(0, nonblocking ? EFD_CLOEXEC | EFD_NONBLOCK : EFD_CLOEXEC);
+ if(efd == -1) {
+ socks[0] = socks[1] = CURL_SOCKET_BAD;
+ return -1;
+ }
+ socks[0] = socks[1] = efd;
+ return 0;
+}
+#elif defined(HAVE_PIPE)
+#ifdef HAVE_FCNTL
#include <fcntl.h>
+#endif
-int Curl_pipe(curl_socket_t socks[2])
+int Curl_pipe(curl_socket_t socks[2], bool nonblocking)
{
if(pipe(socks))
return -1;
-
+#ifdef HAVE_FCNTL
if(fcntl(socks[0], F_SETFD, FD_CLOEXEC) ||
fcntl(socks[1], F_SETFD, FD_CLOEXEC) ) {
close(socks[0]);
@@ -42,13 +59,45 @@ int Curl_pipe(curl_socket_t socks[2])
socks[0] = socks[1] = CURL_SOCKET_BAD;
return -1;
}
+#endif
+ if(nonblocking) {
+ if(curlx_nonblock(socks[0], TRUE) < 0 ||
+ curlx_nonblock(socks[1], TRUE) < 0) {
+ close(socks[0]);
+ close(socks[1]);
+ socks[0] = socks[1] = CURL_SOCKET_BAD;
+ return -1;
+ }
+ }
return 0;
}
#endif
-#if !defined(HAVE_SOCKETPAIR) && !defined(CURL_DISABLE_SOCKETPAIR)
+#ifndef CURL_DISABLE_SOCKETPAIR
+#ifdef HAVE_SOCKETPAIR
+int Curl_socketpair(int domain, int type, int protocol,
+ curl_socket_t socks[2], bool nonblocking)
+{
+#ifdef SOCK_NONBLOCK
+ type = nonblocking ? type | SOCK_NONBLOCK : type;
+#endif
+ if(socketpair(domain, type, protocol, socks))
+ return -1;
+#ifndef SOCK_NONBLOCK
+ if(nonblocking) {
+ if(curlx_nonblock(socks[0], TRUE) < 0 ||
+ curlx_nonblock(socks[1], TRUE) < 0) {
+ close(socks[0]);
+ close(socks[1]);
+ return -1;
+ }
+ }
+#endif
+ return 0;
+}
+#else /* !HAVE_SOCKETPAIR */
#ifdef _WIN32
/*
* This is a socketpair() implementation for Windows.
@@ -80,7 +129,7 @@ int Curl_pipe(curl_socket_t socks[2])
#include "memdebug.h"
int Curl_socketpair(int domain, int type, int protocol,
- curl_socket_t socks[2])
+ curl_socket_t socks[2], bool nonblocking)
{
union {
struct sockaddr_in inaddr;
@@ -106,7 +155,7 @@ int Curl_socketpair(int domain, int type, int protocol,
socks[0] = socks[1] = CURL_SOCKET_BAD;
#if defined(_WIN32) || defined(__CYGWIN__)
- /* don't set SO_REUSEADDR on Windows */
+ /* do not set SO_REUSEADDR on Windows */
(void)reuse;
#ifdef SO_EXCLUSIVEADDRUSE
{
@@ -134,7 +183,7 @@ int Curl_socketpair(int domain, int type, int protocol,
if(connect(socks[0], &a.addr, sizeof(a.inaddr)) == -1)
goto error;
- /* use non-blocking accept to make sure we don't block forever */
+ /* use non-blocking accept to make sure we do not block forever */
if(curlx_nonblock(listener, TRUE) < 0)
goto error;
pfd[0].fd = listener;
@@ -168,7 +217,7 @@ int Curl_socketpair(int domain, int type, int protocol,
nread = sread(socks[1], p, s);
if(nread == -1) {
int sockerr = SOCKERRNO;
- /* Don't block forever */
+ /* Do not block forever */
if(Curl_timediff(Curl_now(), start) > (60 * 1000))
goto error;
if(
@@ -198,6 +247,10 @@ int Curl_socketpair(int domain, int type, int protocol,
} while(1);
}
+ if(nonblocking)
+ if(curlx_nonblock(socks[0], TRUE) < 0 ||
+ curlx_nonblock(socks[1], TRUE) < 0)
+ goto error;
sclose(listener);
return 0;
@@ -207,5 +260,5 @@ error:
sclose(socks[1]);
return -1;
}
-
-#endif /* ! HAVE_SOCKETPAIR */
+#endif
+#endif /* !CURL_DISABLE_SOCKETPAIR */
diff --git a/libs/libcurl/src/socketpair.h b/libs/libcurl/src/socketpair.h
index b52b8b7d8a..38d8245bc4 100644
--- a/libs/libcurl/src/socketpair.h
+++ b/libs/libcurl/src/socketpair.h
@@ -26,21 +26,44 @@
#include "curl_setup.h"
-#ifdef HAVE_PIPE
+#if defined(HAVE_EVENTFD) && \
+ defined(__x86_64__) && \
+ defined(__aarch64__) && \
+ defined(__ia64__) && \
+ defined(__ppc64__) && \
+ defined(__mips64) && \
+ defined(__sparc64__) && \
+ defined(__riscv_64e) && \
+ defined(__s390x__)
+
+/* Use eventfd only with 64-bit CPU architectures because eventfd has a
+ * stringent rule of requiring the 8-byte buffer when calling read(2) and
+ * write(2) on it. In some rare cases, the C standard library implementation
+ * on a 32-bit system might choose to define uint64_t as a 32-bit type for
+ * various reasons (memory limitations, compatibility with older code),
+ * which makes eventfd broken.
+ */
+#define USE_EVENTFD 1
#define wakeup_write write
#define wakeup_read read
#define wakeup_close close
-#define wakeup_create(p) Curl_pipe(p)
+#define wakeup_create(p,nb) Curl_eventfd(p,nb)
-#ifdef HAVE_FCNTL
#include <curl/curl.h>
-int Curl_pipe(curl_socket_t socks[2]);
-#else
-#define Curl_pipe(p) pipe(p)
-#endif
+int Curl_eventfd(curl_socket_t socks[2], bool nonblocking);
+
+#elif defined(HAVE_PIPE)
+
+#define wakeup_write write
+#define wakeup_read read
+#define wakeup_close close
+#define wakeup_create(p,nb) Curl_pipe(p,nb)
+
+#include <curl/curl.h>
+int Curl_pipe(curl_socket_t socks[2], bool nonblocking);
-#else /* HAVE_PIPE */
+#else /* !USE_EVENTFD && !HAVE_PIPE */
#define wakeup_write swrite
#define wakeup_read sread
@@ -60,19 +83,16 @@ int Curl_pipe(curl_socket_t socks[2]);
#define SOCKETPAIR_TYPE SOCK_STREAM
#endif
-#define wakeup_create(p)\
-Curl_socketpair(SOCKETPAIR_FAMILY, SOCKETPAIR_TYPE, 0, p)
-
-#endif /* HAVE_PIPE */
+#define wakeup_create(p,nb)\
+Curl_socketpair(SOCKETPAIR_FAMILY, SOCKETPAIR_TYPE, 0, p, nb)
+#endif /* USE_EVENTFD */
-#ifndef HAVE_SOCKETPAIR
+#ifndef CURL_DISABLE_SOCKETPAIR
#include <curl/curl.h>
int Curl_socketpair(int domain, int type, int protocol,
- curl_socket_t socks[2]);
-#else
-#define Curl_socketpair(a,b,c,d) socketpair(a,b,c,d)
+ curl_socket_t socks[2], bool nonblocking);
#endif
#endif /* HEADER_CURL_SOCKETPAIR_H */
diff --git a/libs/libcurl/src/socks.c b/libs/libcurl/src/socks.c
index 016e9db041..a21e6d51fb 100644
--- a/libs/libcurl/src/socks.c
+++ b/libs/libcurl/src/socks.c
@@ -125,7 +125,7 @@ int Curl_blockread_all(struct Curl_cfilter *cf,
}
nread = Curl_conn_cf_recv(cf->next, data, buf, buffersize, &err);
if(nread <= 0) {
- result = err;
+ result = (int)err;
if(CURLE_AGAIN == err)
continue;
if(err) {
@@ -194,7 +194,7 @@ static void socksstate(struct socks_state *sx, struct Curl_easy *data,
(void)data;
if(oldstate == state)
- /* don't bother when the new state is the same as the old state */
+ /* do not bother when the new state is the same as the old state */
return;
sx->state = state;
@@ -335,7 +335,7 @@ static CURLproxycode do_SOCKS4(struct Curl_cfilter *cf,
goto CONNECT_RESOLVED;
}
- /* socks4a doesn't resolve anything locally */
+ /* socks4a does not resolve anything locally */
sxstate(sx, data, CONNECT_REQ_INIT);
goto CONNECT_REQ_INIT;
@@ -365,7 +365,7 @@ CONNECT_RESOLVED:
{
struct Curl_addrinfo *hp = NULL;
/*
- * We cannot use 'hostent' as a struct that Curl_resolv() returns. It
+ * We cannot use 'hostent' as a struct that Curl_resolv() returns. It
* returns a Curl_addrinfo pointer that may not always look the same.
*/
if(dns) {
@@ -413,7 +413,7 @@ CONNECT_REQ_INIT:
/* there is no real size limit to this field in the protocol, but
SOCKS5 limits the proxy user field to 255 bytes and it seems likely
that a longer field is either a mistake or malicious input */
- failf(data, "Too long SOCKS proxy user name");
+ failf(data, "Too long SOCKS proxy username");
return CURLPX_LONG_USER;
}
/* copy the proxy name WITH trailing zero */
@@ -440,7 +440,7 @@ CONNECT_REQ_INIT:
(packetsize + hostnamelen < sizeof(sx->buffer)))
strcpy((char *)socksreq + packetsize, sx->hostname);
else {
- failf(data, "SOCKS4: too long host name");
+ failf(data, "SOCKS4: too long hostname");
return CURLPX_LONG_HOSTNAME;
}
packetsize += hostnamelen;
@@ -516,7 +516,7 @@ CONNECT_REQ_INIT:
break;
case 91:
failf(data,
- "Can't complete SOCKS4 connection to %d.%d.%d.%d:%d. (%d)"
+ "cannot complete SOCKS4 connection to %d.%d.%d.%d:%d. (%d)"
", request rejected or failed.",
socksreq[4], socksreq[5], socksreq[6], socksreq[7],
(((unsigned char)socksreq[2] << 8) | (unsigned char)socksreq[3]),
@@ -524,7 +524,7 @@ CONNECT_REQ_INIT:
return CURLPX_REQUEST_FAILED;
case 92:
failf(data,
- "Can't complete SOCKS4 connection to %d.%d.%d.%d:%d. (%d)"
+ "cannot complete SOCKS4 connection to %d.%d.%d.%d:%d. (%d)"
", request rejected because SOCKS server cannot connect to "
"identd on the client.",
socksreq[4], socksreq[5], socksreq[6], socksreq[7],
@@ -533,7 +533,7 @@ CONNECT_REQ_INIT:
return CURLPX_IDENTD;
case 93:
failf(data,
- "Can't complete SOCKS4 connection to %d.%d.%d.%d:%d. (%d)"
+ "cannot complete SOCKS4 connection to %d.%d.%d.%d:%d. (%d)"
", request rejected because the client program and identd "
"report different user-ids.",
socksreq[4], socksreq[5], socksreq[6], socksreq[7],
@@ -542,7 +542,7 @@ CONNECT_REQ_INIT:
return CURLPX_IDENTD_DIFFER;
default:
failf(data,
- "Can't complete SOCKS4 connection to %d.%d.%d.%d:%d. (%d)"
+ "cannot complete SOCKS4 connection to %d.%d.%d.%d:%d. (%d)"
", Unknown.",
socksreq[4], socksreq[5], socksreq[6], socksreq[7],
(((unsigned char)socksreq[2] << 8) | (unsigned char)socksreq[3]),
@@ -562,7 +562,7 @@ static CURLproxycode do_SOCKS5(struct Curl_cfilter *cf,
struct Curl_easy *data)
{
/*
- According to the RFC1928, section "6. Replies". This is what a SOCK5
+ According to the RFC1928, section "6. Replies". This is what a SOCK5
replies:
+----+-----+-------+------+----------+----------+
@@ -714,7 +714,7 @@ CONNECT_SOCKS_READ_INIT:
CONNECT_AUTH_INIT:
case CONNECT_AUTH_INIT: {
- /* Needs user name and password */
+ /* Needs username and password */
size_t proxy_user_len, proxy_password_len;
if(sx->proxy_user && sx->proxy_password) {
proxy_user_len = strlen(sx->proxy_user);
@@ -738,7 +738,7 @@ CONNECT_AUTH_INIT:
if(sx->proxy_user && proxy_user_len) {
/* the length must fit in a single byte */
if(proxy_user_len > 255) {
- failf(data, "Excessive user name length for proxy auth");
+ failf(data, "Excessive username length for proxy auth");
return CURLPX_LONG_USER;
}
memcpy(socksreq + len, sx->proxy_user, proxy_user_len);
@@ -990,7 +990,7 @@ CONNECT_REQ_SEND:
else if(socksreq[1]) { /* Anything besides 0 is an error */
CURLproxycode rc = CURLPX_REPLY_UNASSIGNED;
int code = socksreq[1];
- failf(data, "Can't complete SOCKS5 connection to %s. (%d)",
+ failf(data, "cannot complete SOCKS5 connection to %s. (%d)",
sx->hostname, (unsigned char)socksreq[1]);
if(code < 9) {
/* RFC 1928 section 6 lists: */
@@ -1120,7 +1120,7 @@ static void socks_proxy_cf_free(struct Curl_cfilter *cf)
}
/* After a TCP connection to the proxy has been verified, this function does
- the next magic steps. If 'done' isn't set TRUE, it is not done yet and
+ the next magic steps. If 'done' is not set TRUE, it is not done yet and
must be called again.
Note: this function's sub-functions call failf()
@@ -1249,6 +1249,7 @@ struct Curl_cftype Curl_cft_socks_proxy = {
socks_proxy_cf_destroy,
socks_proxy_cf_connect,
socks_proxy_cf_close,
+ Curl_cf_def_shutdown,
socks_cf_get_host,
socks_cf_adjust_pollset,
Curl_cf_def_data_pending,
diff --git a/libs/libcurl/src/socks_gssapi.c b/libs/libcurl/src/socks_gssapi.c
index a592caad5b..c302ddf5bb 100644
--- a/libs/libcurl/src/socks_gssapi.c
+++ b/libs/libcurl/src/socks_gssapi.c
@@ -172,7 +172,7 @@ CURLcode Curl_SOCKS5_gssapi_negotiate(struct Curl_cfilter *cf,
(void)curlx_nonblock(sock, FALSE);
- /* As long as we need to keep sending some context info, and there's no */
+ /* As long as we need to keep sending some context info, and there is no */
/* errors, keep sending it... */
for(;;) {
gss_major_status = Curl_gss_init_sec_context(data,
@@ -201,7 +201,7 @@ CURLcode Curl_SOCKS5_gssapi_negotiate(struct Curl_cfilter *cf,
if(gss_send_token.length) {
socksreq[0] = 1; /* GSS-API subnegotiation version */
socksreq[1] = 1; /* authentication message type */
- us_length = htons((short)gss_send_token.length);
+ us_length = htons((unsigned short)gss_send_token.length);
memcpy(socksreq + 2, &us_length, sizeof(short));
nwritten = Curl_conn_cf_send(cf->next, data, (char *)socksreq, 4, &code);
@@ -306,7 +306,7 @@ CURLcode Curl_SOCKS5_gssapi_negotiate(struct Curl_cfilter *cf,
gss_minor_status, "gss_inquire_context")) {
gss_delete_sec_context(&gss_status, &gss_context, NULL);
gss_release_name(&gss_status, &gss_client_name);
- failf(data, "Failed to determine user name.");
+ failf(data, "Failed to determine username.");
return CURLE_COULDNT_CONNECT;
}
gss_major_status = gss_display_name(&gss_minor_status, gss_client_name,
@@ -316,7 +316,7 @@ CURLcode Curl_SOCKS5_gssapi_negotiate(struct Curl_cfilter *cf,
gss_delete_sec_context(&gss_status, &gss_context, NULL);
gss_release_name(&gss_status, &gss_client_name);
gss_release_buffer(&gss_status, &gss_send_token);
- failf(data, "Failed to determine user name.");
+ failf(data, "Failed to determine username.");
return CURLE_COULDNT_CONNECT;
}
user = malloc(gss_send_token.length + 1);
@@ -377,7 +377,7 @@ CURLcode Curl_SOCKS5_gssapi_negotiate(struct Curl_cfilter *cf,
*
* The token is produced by encapsulating an octet containing the
* required protection level using gss_seal()/gss_wrap() with conf_req
- * set to FALSE. The token is verified using gss_unseal()/
+ * set to FALSE. The token is verified using gss_unseal()/
* gss_unwrap().
*
*/
@@ -406,7 +406,7 @@ CURLcode Curl_SOCKS5_gssapi_negotiate(struct Curl_cfilter *cf,
}
gss_release_buffer(&gss_status, &gss_send_token);
- us_length = htons((short)gss_w_token.length);
+ us_length = htons((unsigned short)gss_w_token.length);
memcpy(socksreq + 2, &us_length, sizeof(short));
}
diff --git a/libs/libcurl/src/socks_sspi.c b/libs/libcurl/src/socks_sspi.c
index dc14e7e3e7..882c8a2a45 100644
--- a/libs/libcurl/src/socks_sspi.c
+++ b/libs/libcurl/src/socks_sspi.c
@@ -158,7 +158,7 @@ CURLcode Curl_SOCKS5_gssapi_negotiate(struct Curl_cfilter *cf,
(void)curlx_nonblock(sock, FALSE);
- /* As long as we need to keep sending some context info, and there's no */
+ /* As long as we need to keep sending some context info, and there is no */
/* errors, keep sending it... */
for(;;) {
TCHAR *sname;
@@ -204,7 +204,7 @@ CURLcode Curl_SOCKS5_gssapi_negotiate(struct Curl_cfilter *cf,
if(sspi_send_token.cbBuffer) {
socksreq[0] = 1; /* GSS-API subnegotiation version */
socksreq[1] = 1; /* authentication message type */
- us_length = htons((short)sspi_send_token.cbBuffer);
+ us_length = htons((unsigned short)sspi_send_token.cbBuffer);
memcpy(socksreq + 2, &us_length, sizeof(short));
written = Curl_conn_cf_send(cf->next, data, (char *)socksreq, 4, &code);
@@ -328,7 +328,7 @@ CURLcode Curl_SOCKS5_gssapi_negotiate(struct Curl_cfilter *cf,
if(check_sspi_err(data, status, "QueryCredentialAttributes")) {
s_pSecFn->DeleteSecurityContext(&sspi_context);
s_pSecFn->FreeContextBuffer(names.sUserName);
- failf(data, "Failed to determine user name.");
+ failf(data, "Failed to determine username.");
return CURLE_COULDNT_CONNECT;
}
else {
@@ -383,13 +383,13 @@ CURLcode Curl_SOCKS5_gssapi_negotiate(struct Curl_cfilter *cf,
*
* The token is produced by encapsulating an octet containing the
* required protection level using gss_seal()/gss_wrap() with conf_req
- * set to FALSE. The token is verified using gss_unseal()/
+ * set to FALSE. The token is verified using gss_unseal()/
* gss_unwrap().
*
*/
if(data->set.socks5_gssapi_nec) {
- us_length = htons((short)1);
+ us_length = htons((unsigned short)1);
memcpy(socksreq + 2, &us_length, sizeof(short));
}
else {
@@ -472,7 +472,7 @@ CURLcode Curl_SOCKS5_gssapi_negotiate(struct Curl_cfilter *cf,
sspi_w_token[2].pvBuffer = NULL;
sspi_w_token[2].cbBuffer = 0;
- us_length = htons((short)sspi_send_token.cbBuffer);
+ us_length = htons((unsigned short)sspi_send_token.cbBuffer);
memcpy(socksreq + 2, &us_length, sizeof(short));
}
diff --git a/libs/libcurl/src/splay.c b/libs/libcurl/src/splay.c
index 358368723a..20053c6770 100644
--- a/libs/libcurl/src/splay.c
+++ b/libs/libcurl/src/splay.c
@@ -93,7 +93,7 @@ struct Curl_tree *Curl_splay(struct curltime i,
return t;
}
-/* Insert key i into the tree t. Return a pointer to the resulting tree or
+/* Insert key i into the tree t. Return a pointer to the resulting tree or
* NULL if something went wrong.
*
* @unittest: 1309
@@ -150,7 +150,7 @@ struct Curl_tree *Curl_splayinsert(struct curltime i,
}
/* Finds and deletes the best-fit node from the tree. Return a pointer to the
- resulting tree. best-fit means the smallest node if it is not larger than
+ resulting tree. best-fit means the smallest node if it is not larger than
the key */
struct Curl_tree *Curl_splaygetbest(struct curltime i,
struct Curl_tree *t,
@@ -197,13 +197,13 @@ struct Curl_tree *Curl_splaygetbest(struct curltime i,
}
-/* Deletes the very node we point out from the tree if it's there. Stores a
+/* Deletes the very node we point out from the tree if it is there. Stores a
* pointer to the new resulting tree in 'newroot'.
*
* Returns zero on success and non-zero on errors!
* When returning error, it does not touch the 'newroot' pointer.
*
- * NOTE: when the last node of the tree is removed, there's no tree left so
+ * NOTE: when the last node of the tree is removed, there is no tree left so
* 'newroot' will be made to point to NULL.
*
* @unittest: 1309
@@ -241,7 +241,7 @@ int Curl_splayremove(struct Curl_tree *t,
/* First make sure that we got the same root node as the one we want
to remove, as otherwise we might be trying to remove a node that
- isn't actually in the tree.
+ is not actually in the tree.
We cannot just compare the keys here as a double remove in quick
succession of a node with key != KEY_NOTUSED && same != NULL
@@ -249,7 +249,7 @@ int Curl_splayremove(struct Curl_tree *t,
if(t != removenode)
return 2;
- /* Check if there is a list with identical sizes, as then we're trying to
+ /* Check if there is a list with identical sizes, as then we are trying to
remove the root node of a list of nodes with identical keys. */
x = t->samen;
if(x != t) {
diff --git a/libs/libcurl/src/splay.h b/libs/libcurl/src/splay.h
index 9783b56654..5ec7b7993d 100644
--- a/libs/libcurl/src/splay.h
+++ b/libs/libcurl/src/splay.h
@@ -32,7 +32,7 @@ struct Curl_tree {
struct Curl_tree *samen; /* points to the next node with identical key */
struct Curl_tree *samep; /* points to the prev node with identical key */
struct curltime key; /* this node's "sort" key */
- void *payload; /* data the splay code doesn't care about */
+ void *payload; /* data the splay code does not care about */
};
struct Curl_tree *Curl_splay(struct curltime i,
diff --git a/libs/libcurl/src/strcase.c b/libs/libcurl/src/strcase.c
index 0a658d0332..86e81295de 100644
--- a/libs/libcurl/src/strcase.c
+++ b/libs/libcurl/src/strcase.c
@@ -93,12 +93,12 @@ static int casecompare(const char *first, const char *second)
{
while(*first && *second) {
if(Curl_raw_toupper(*first) != Curl_raw_toupper(*second))
- /* get out of the loop as soon as they don't match */
+ /* get out of the loop as soon as they do not match */
return 0;
first++;
second++;
}
- /* If we're here either the strings are the same or the length is different.
+ /* If we are here either the strings are the same or the length is different.
We can just test if the "current" character is non-zero for one and zero
for the other. Note that the characters may not be exactly the same even
if they match, we only want to compare zero-ness. */
@@ -141,8 +141,8 @@ int curl_strnequal(const char *first, const char *second, size_t max)
/* if both pointers are NULL then treat them as equal if max is non-zero */
return (NULL == first && NULL == second && max);
}
-/* Copy an upper case version of the string from src to dest. The
- * strings may overlap. No more than n characters of the string are copied
+/* Copy an upper case version of the string from src to dest. The
+ * strings may overlap. No more than n characters of the string are copied
* (including any NUL) and the destination string will NOT be
* NUL-terminated if that limit is reached.
*/
@@ -156,8 +156,8 @@ void Curl_strntoupper(char *dest, const char *src, size_t n)
} while(*src++ && --n);
}
-/* Copy a lower case version of the string from src to dest. The
- * strings may overlap. No more than n characters of the string are copied
+/* Copy a lower case version of the string from src to dest. The
+ * strings may overlap. No more than n characters of the string are copied
* (including any NUL) and the destination string will NOT be
* NUL-terminated if that limit is reached.
*/
diff --git a/libs/libcurl/src/strerror.c b/libs/libcurl/src/strerror.c
index 95986724e0..2d8fe679f8 100644
--- a/libs/libcurl/src/strerror.c
+++ b/libs/libcurl/src/strerror.c
@@ -74,13 +74,13 @@ curl_easy_strerror(CURLcode error)
" this libcurl due to a build-time decision.";
case CURLE_COULDNT_RESOLVE_PROXY:
- return "Couldn't resolve proxy name";
+ return "Could not resolve proxy name";
case CURLE_COULDNT_RESOLVE_HOST:
- return "Couldn't resolve host name";
+ return "Could not resolve hostname";
case CURLE_COULDNT_CONNECT:
- return "Couldn't connect to server";
+ return "Could not connect to server";
case CURLE_WEIRD_SERVER_REPLY:
return "Weird server reply";
@@ -107,19 +107,19 @@ curl_easy_strerror(CURLcode error)
return "FTP: unknown 227 response format";
case CURLE_FTP_CANT_GET_HOST:
- return "FTP: can't figure out the host in the PASV response";
+ return "FTP: cannot figure out the host in the PASV response";
case CURLE_HTTP2:
return "Error in the HTTP2 framing layer";
case CURLE_FTP_COULDNT_SET_TYPE:
- return "FTP: couldn't set file type";
+ return "FTP: could not set file type";
case CURLE_PARTIAL_FILE:
return "Transferred a partial file";
case CURLE_FTP_COULDNT_RETR_FILE:
- return "FTP: couldn't retrieve (RETR failed) the specified file";
+ return "FTP: could not retrieve (RETR failed) the specified file";
case CURLE_QUOTE_ERROR:
return "Quote command returned error";
@@ -158,10 +158,10 @@ curl_easy_strerror(CURLcode error)
return "SSL connect error";
case CURLE_BAD_DOWNLOAD_RESUME:
- return "Couldn't resume download";
+ return "Could not resume download";
case CURLE_FILE_COULDNT_READ_FILE:
- return "Couldn't read a file:// file";
+ return "Could not read a file:// file";
case CURLE_LDAP_CANNOT_BIND:
return "LDAP: cannot bind";
@@ -212,7 +212,7 @@ curl_easy_strerror(CURLcode error)
return "Problem with the local SSL certificate";
case CURLE_SSL_CIPHER:
- return "Couldn't use specified SSL cipher";
+ return "Could not use specified SSL cipher";
case CURLE_PEER_FAILED_VERIFICATION:
return "SSL peer certificate or SSH remote key was not OK";
@@ -345,16 +345,15 @@ curl_easy_strerror(CURLcode error)
/*
* By using a switch, gcc -Wall will complain about enum values
* which do not appear, helping keep this function up-to-date.
- * By using gcc -Wall -Werror, you can't forget.
+ * By using gcc -Wall -Werror, you cannot forget.
*
- * A table would not have the same benefit. Most compilers will
- * generate code very similar to a table in any case, so there
- * is little performance gain from a table. And something is broken
- * for the user's application, anyways, so does it matter how fast
- * it _doesn't_ work?
+ * A table would not have the same benefit. Most compilers will generate
+ * code very similar to a table in any case, so there is little performance
+ * gain from a table. Something is broken for the user's application,
+ * anyways, so does it matter how fast it _does not_ work?
*
- * The line number for the error will be near this comment, which
- * is why it is here, and not at the start of the switch.
+ * The line number for the error will be near this comment, which is why it
+ * is here, and not at the start of the switch.
*/
return "Unknown error";
#else
@@ -795,7 +794,7 @@ get_winapi_error(int err, char *buf, size_t buflen)
expect the local codepage (eg fprintf, failf, infof).
FormatMessageW -> wcstombs is used for Windows CE compatibility. */
if(FormatMessageW((FORMAT_MESSAGE_FROM_SYSTEM |
- FORMAT_MESSAGE_IGNORE_INSERTS), NULL, err,
+ FORMAT_MESSAGE_IGNORE_INSERTS), NULL, (DWORD)err,
LANG_NEUTRAL, wbuf, sizeof(wbuf)/sizeof(wchar_t), NULL)) {
size_t written = wcstombs(buf, wbuf, buflen - 1);
if(written != (size_t)-1)
@@ -823,9 +822,9 @@ get_winapi_error(int err, char *buf, size_t buflen)
* The 'err' argument passed in to this function MUST be a true errno number
* as reported on this system. We do no range checking on the number before
* we pass it to the "number-to-message" conversion function and there might
- * be systems that don't do proper range checking in there themselves.
+ * be systems that do not do proper range checking in there themselves.
*
- * We don't do range checking (on systems other than Windows) since there is
+ * We do not do range checking (on systems other than Windows) since there is
* no good reliable and portable way to do it.
*
* On Windows different types of error codes overlap. This function has an
@@ -865,7 +864,7 @@ const char *Curl_strerror(int err, char *buf, size_t buflen)
#ifdef USE_WINSOCK
!get_winsock_error(err, buf, buflen) &&
#endif
- !get_winapi_error((DWORD)err, buf, buflen))
+ !get_winapi_error(err, buf, buflen))
msnprintf(buf, buflen, "Unknown error %d (%#x)", err, err);
}
#else /* not Windows coming up */
@@ -944,7 +943,7 @@ const char *Curl_winapi_strerror(DWORD err, char *buf, size_t buflen)
*buf = '\0';
#ifndef CURL_DISABLE_VERBOSE_STRINGS
- if(!get_winapi_error(err, buf, buflen)) {
+ if(!get_winapi_error((int)err, buf, buflen)) {
msnprintf(buf, buflen, "Unknown error %lu (0x%08lX)", err, err);
}
#else
diff --git a/libs/libcurl/src/strtok.c b/libs/libcurl/src/strtok.c
index a51771a032..ffd546170c 100644
--- a/libs/libcurl/src/strtok.c
+++ b/libs/libcurl/src/strtok.c
@@ -65,4 +65,4 @@ Curl_strtok_r(char *ptr, const char *sep, char **end)
return NULL;
}
-#endif /* this was only compiled if strtok_r wasn't present */
+#endif /* this was only compiled if strtok_r was not present */
diff --git a/libs/libcurl/src/strtoofft.c b/libs/libcurl/src/strtoofft.c
index fd5864f372..48332495e9 100644
--- a/libs/libcurl/src/strtoofft.c
+++ b/libs/libcurl/src/strtoofft.c
@@ -31,7 +31,7 @@
* NOTE:
*
* In the ISO C standard (IEEE Std 1003.1), there is a strtoimax() function we
- * could use in case strtoll() doesn't exist... See
+ * could use in case strtoll() does not exist... See
* https://www.opengroup.org/onlinepubs/009695399/functions/strtoimax.html
*/
@@ -73,7 +73,7 @@ static const char valchars[] =
static int get_char(char c, int base);
/**
- * Custom version of the strtooff function. This extracts a curl_off_t
+ * Custom version of the strtooff function. This extracts a curl_off_t
* value from the given input string and returns it.
*/
static curl_off_t strtooff(const char *nptr, char **endptr, int base)
@@ -120,8 +120,8 @@ static curl_off_t strtooff(const char *nptr, char **endptr, int base)
}
}
- /* Matching strtol, if the base is 0 and it doesn't look like
- * the number is octal or hex, we assume it's base 10.
+ /* Matching strtol, if the base is 0 and it does not look like
+ * the number is octal or hex, we assume it is base 10.
*/
if(base == 0) {
base = 10;
@@ -168,7 +168,7 @@ static curl_off_t strtooff(const char *nptr, char **endptr, int base)
* @param c the character to interpret according to base
* @param base the base in which to interpret c
*
- * @return the value of c in base, or -1 if c isn't in range
+ * @return the value of c in base, or -1 if c is not in range
*/
static int get_char(char c, int base)
{
@@ -204,10 +204,10 @@ static int get_char(char c, int base)
return value;
}
-#endif /* Only present if we need strtoll, but don't have it. */
+#endif /* Only present if we need strtoll, but do not have it. */
/*
- * Parse a *positive* up to 64 bit number written in ascii.
+ * Parse a *positive* up to 64-bit number written in ascii.
*/
CURLofft curlx_strtoofft(const char *str, char **endp, int base,
curl_off_t *num)
@@ -222,7 +222,7 @@ CURLofft curlx_strtoofft(const char *str, char **endp, int base,
str++;
if(('-' == *str) || (ISSPACE(*str))) {
if(endp)
- *endp = (char *)str; /* didn't actually move */
+ *endp = (char *)str; /* did not actually move */
return CURL_OFFT_INVAL; /* nothing parsed */
}
number = strtooff(str, &end, base);
diff --git a/libs/libcurl/src/strtoofft.h b/libs/libcurl/src/strtoofft.h
index 93552c9630..e6ef31a870 100644
--- a/libs/libcurl/src/strtoofft.h
+++ b/libs/libcurl/src/strtoofft.h
@@ -30,7 +30,7 @@
* Determine which string to integral data type conversion function we use
* to implement string conversion to our curl_off_t integral data type.
*
- * Notice that curl_off_t might be 64 or 32 bit wide, and that it might use
+ * Notice that curl_off_t might be 64 or 32 bits wide, and that it might use
* an underlying data type which might be 'long', 'int64_t', 'long long' or
* '__int64' and more remotely other data types.
*
diff --git a/libs/libcurl/src/system_win32.c b/libs/libcurl/src/system_win32.c
index fc20e09f31..53ddcf345d 100644
--- a/libs/libcurl/src/system_win32.c
+++ b/libs/libcurl/src/system_win32.c
@@ -68,7 +68,7 @@ CURLcode Curl_win32_init(long flags)
res = WSAStartup(wVersionRequested, &wsaData);
if(res)
- /* Tell the user that we couldn't find a usable */
+ /* Tell the user that we could not find a usable */
/* winsock.dll. */
return CURLE_FAILED_INIT;
@@ -80,7 +80,7 @@ CURLcode Curl_win32_init(long flags)
if(LOBYTE(wsaData.wVersion) != LOBYTE(wVersionRequested) ||
HIBYTE(wsaData.wVersion) != HIBYTE(wVersionRequested) ) {
- /* Tell the user that we couldn't find a usable */
+ /* Tell the user that we could not find a usable */
/* winsock.dll. */
WSACleanup();
@@ -112,7 +112,11 @@ CURLcode Curl_win32_init(long flags)
}
#ifdef USE_WINSOCK
+#ifdef CURL_WINDOWS_APP
+ ws2_32Dll = Curl_load_library(TEXT("ws2_32.dll"));
+#else
ws2_32Dll = GetModuleHandleA("ws2_32");
+#endif
if(ws2_32Dll) {
Curl_FreeAddrInfoExW = CURLX_FUNCTION_CAST(FREEADDRINFOEXW_FN,
GetProcAddress(ws2_32Dll, "FreeAddrInfoExW"));
@@ -208,7 +212,7 @@ HMODULE Curl_load_library(LPCTSTR filename)
HMODULE hModule = NULL;
LOADLIBRARYEX_FN pLoadLibraryEx = NULL;
- /* Get a handle to kernel32 so we can access it's functions at runtime */
+ /* Get a handle to kernel32 so we can access it is functions at runtime */
HMODULE hKernel32 = GetModuleHandle(TEXT("kernel32"));
if(!hKernel32)
return NULL;
@@ -219,7 +223,7 @@ HMODULE Curl_load_library(LPCTSTR filename)
CURLX_FUNCTION_CAST(LOADLIBRARYEX_FN,
(GetProcAddress(hKernel32, LOADLIBARYEX)));
- /* Detect if there's already a path in the filename and load the library if
+ /* Detect if there is already a path in the filename and load the library if
there is. Note: Both back slashes and forward slashes have been supported
since the earlier days of DOS at an API level although they are not
supported by command prompt */
@@ -261,10 +265,22 @@ HMODULE Curl_load_library(LPCTSTR filename)
}
return hModule;
#else
- /* the Universal Windows Platform (UWP) can't do this */
+ /* the Universal Windows Platform (UWP) cannot do this */
(void)filename;
return NULL;
#endif
}
+bool Curl_win32_impersonating(void)
+{
+#ifndef CURL_WINDOWS_APP
+ HANDLE token = NULL;
+ if(OpenThreadToken(GetCurrentThread(), TOKEN_QUERY, TRUE, &token)) {
+ CloseHandle(token);
+ return TRUE;
+ }
+#endif
+ return FALSE;
+}
+
#endif /* _WIN32 */
diff --git a/libs/libcurl/src/system_win32.h b/libs/libcurl/src/system_win32.h
index aaae969f74..ec66d3b887 100644
--- a/libs/libcurl/src/system_win32.h
+++ b/libs/libcurl/src/system_win32.h
@@ -28,6 +28,8 @@
#ifdef _WIN32
+#include <curl/curl.h>
+
extern LARGE_INTEGER Curl_freq;
extern bool Curl_isVistaOrGreater;
extern bool Curl_isWindows8OrGreater;
@@ -68,6 +70,8 @@ extern FREEADDRINFOEXW_FN Curl_FreeAddrInfoExW;
extern GETADDRINFOEXCANCEL_FN Curl_GetAddrInfoExCancel;
extern GETADDRINFOEXW_FN Curl_GetAddrInfoExW;
+bool Curl_win32_impersonating(void);
+
/* This is used to dynamically load DLLs */
HMODULE Curl_load_library(LPCTSTR filename);
#else /* _WIN32 */
diff --git a/libs/libcurl/src/telnet.c b/libs/libcurl/src/telnet.c
index c60398f627..bced497ad2 100644
--- a/libs/libcurl/src/telnet.c
+++ b/libs/libcurl/src/telnet.c
@@ -798,12 +798,12 @@ static CURLcode check_telnet_options(struct Curl_easy *data)
struct TELNET *tn = data->req.p.telnet;
CURLcode result = CURLE_OK;
- /* Add the user name as an environment variable if it
+ /* Add the username as an environment variable if it
was given on the command line */
if(data->state.aptr.user) {
char buffer[256];
if(str_is_nonascii(data->conn->user)) {
- DEBUGF(infof(data, "set a non ASCII user name in telnet"));
+ DEBUGF(infof(data, "set a non ASCII username in telnet"));
return CURLE_BAD_FUNCTION_ARGUMENT;
}
msnprintf(buffer, sizeof(buffer), "USER,%s", data->conn->user);
@@ -1191,12 +1191,12 @@ process_iac:
if(c != CURL_SE) {
if(c != CURL_IAC) {
/*
- * This is an error. We only expect to get "IAC IAC" or "IAC SE".
- * Several things may have happened. An IAC was not doubled, the
+ * This is an error. We only expect to get "IAC IAC" or "IAC SE".
+ * Several things may have happened. An IAC was not doubled, the
* IAC SE was left off, or another option got inserted into the
- * suboption are all possibilities. If we assume that the IAC was
+ * suboption are all possibilities. If we assume that the IAC was
* not doubled, and really the IAC SE was left off, we could get
- * into an infinite loop here. So, instead, we terminate the
+ * into an infinite loop here. So, instead, we terminate the
* suboption, and process the partial suboption if we can.
*/
CURL_SB_ACCUM(tn, CURL_IAC);
@@ -1370,7 +1370,7 @@ static CURLcode telnet_do(struct Curl_easy *data, bool *done)
else use the old WaitForMultipleObjects() way */
if(GetFileType(stdin_handle) == FILE_TYPE_PIPE ||
data->set.is_fread_set) {
- /* Don't wait for stdin_handle, just wait for event_handle */
+ /* Do not wait for stdin_handle, just wait for event_handle */
obj_count = 1;
/* Check stdin_handle per 100 milliseconds */
wait_timeout = 100;
@@ -1470,7 +1470,7 @@ static CURLcode telnet_do(struct Curl_easy *data, bool *done)
if(events.lNetworkEvents & FD_READ) {
/* read data from network */
result = Curl_xfer_recv(data, buffer, sizeof(buffer), &nread);
- /* read would've blocked. Loop again */
+ /* read would have blocked. Loop again */
if(result == CURLE_AGAIN)
break;
/* returned not-zero, this an error */
@@ -1492,7 +1492,7 @@ static CURLcode telnet_do(struct Curl_easy *data, bool *done)
}
/* Negotiate if the peer has started negotiating,
- otherwise don't. We don't want to speak telnet with
+ otherwise do not. We do not want to speak telnet with
non-telnet servers, like POP or SMTP. */
if(tn->please_negotiate && !tn->already_negotiated) {
negotiate(data);
@@ -1544,7 +1544,7 @@ static CURLcode telnet_do(struct Curl_easy *data, bool *done)
while(keepon) {
DEBUGF(infof(data, "telnet_do, poll %d fds", poll_cnt));
- switch(Curl_poll(pfd, poll_cnt, interval_ms)) {
+ switch(Curl_poll(pfd, (unsigned int)poll_cnt, interval_ms)) {
case -1: /* error, stop reading */
keepon = FALSE;
continue;
@@ -1556,7 +1556,7 @@ static CURLcode telnet_do(struct Curl_easy *data, bool *done)
if(pfd[0].revents & POLLIN) {
/* read data from network */
result = Curl_xfer_recv(data, buffer, sizeof(buffer), &nread);
- /* read would've blocked. Loop again */
+ /* read would have blocked. Loop again */
if(result == CURLE_AGAIN)
break;
/* returned not-zero, this an error */
@@ -1588,7 +1588,7 @@ static CURLcode telnet_do(struct Curl_easy *data, bool *done)
}
/* Negotiate if the peer has started negotiating,
- otherwise don't. We don't want to speak telnet with
+ otherwise do not. We do not want to speak telnet with
non-telnet servers, like POP or SMTP. */
if(tn->please_negotiate && !tn->already_negotiated) {
negotiate(data);
@@ -1645,7 +1645,7 @@ static CURLcode telnet_do(struct Curl_easy *data, bool *done)
}
#endif
/* mark this as "no further transfer wanted" */
- Curl_xfer_setup(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup_nop(data);
return result;
}
diff --git a/libs/libcurl/src/tftp.c b/libs/libcurl/src/tftp.c
index 623a2864bc..bc562c8e49 100644
--- a/libs/libcurl/src/tftp.c
+++ b/libs/libcurl/src/tftp.c
@@ -245,7 +245,7 @@ static CURLcode tftp_set_timeouts(struct tftp_state_data *state)
(int)state->state, timeout_ms, state->retry_time, state->retry_max);
/* init RX time */
- time(&state->rx_time);
+ state->rx_time = time(NULL);
return CURLE_OK;
}
@@ -315,7 +315,7 @@ static CURLcode tftp_parse_option_ack(struct tftp_state_data *state,
const char *tmp = ptr;
struct Curl_easy *data = state->data;
- /* if OACK doesn't contain blksize option, the default (512) must be used */
+ /* if OACK does not contain blksize option, the default (512) must be used */
state->blksize = TFTP_BLKSIZE_DEFAULT;
while(tmp < ptr + len) {
@@ -349,7 +349,7 @@ static CURLcode tftp_parse_option_ack(struct tftp_state_data *state,
return CURLE_TFTP_ILLEGAL;
}
else if(blksize > state->requested_blksize) {
- /* could realloc pkt buffers here, but the spec doesn't call out
+ /* could realloc pkt buffers here, but the spec does not call out
* support for the server requesting a bigger blksize than the client
* requests */
failf(data, "%s (%ld)",
@@ -461,7 +461,7 @@ static CURLcode tftp_send_first(struct tftp_state_data *state,
setpacketevent(&state->spacket, TFTP_EVENT_RRQ);
}
/* As RFC3617 describes the separator slash is not actually part of the
- file name so we skip the always-present first letter of the path
+ filename so we skip the always-present first letter of the path
string. */
result = Curl_urldecode(&state->data->state.up.path[1], 0,
&filename, NULL, REJECT_ZERO);
@@ -469,9 +469,9 @@ static CURLcode tftp_send_first(struct tftp_state_data *state,
return result;
if(strlen(filename) > (state->blksize - strlen(mode) - 4)) {
- failf(data, "TFTP file name too long");
+ failf(data, "TFTP filename too long");
free(filename);
- return CURLE_TFTP_ILLEGAL; /* too long file name field */
+ return CURLE_TFTP_ILLEGAL; /* too long filename field */
}
msnprintf((char *)state->spacket.data + 2,
@@ -528,7 +528,7 @@ static CURLcode tftp_send_first(struct tftp_state_data *state,
senddata = sendto(state->sockfd, (void *)state->spacket.data,
(SEND_TYPE_ARG3)sbytes, 0,
&data->conn->remote_addr->sa_addr,
- data->conn->remote_addr->addrlen);
+ (curl_socklen_t)data->conn->remote_addr->addrlen);
if(senddata != (ssize_t)sbytes) {
char buffer[STRERROR_LEN];
failf(data, "%s", Curl_strerror(SOCKERRNO, buffer, sizeof(buffer)));
@@ -590,7 +590,7 @@ static CURLcode tftp_rx(struct tftp_state_data *state,
/* Is this the block we expect? */
rblock = getrpacketblock(&state->rpacket);
if(NEXT_BLOCKNUM(state->block) == rblock) {
- /* This is the expected block. Reset counters and ACK it. */
+ /* This is the expected block. Reset counters and ACK it. */
state->retries = 0;
}
else if(state->block == rblock) {
@@ -626,7 +626,7 @@ static CURLcode tftp_rx(struct tftp_state_data *state,
else {
state->state = TFTP_STATE_RX;
}
- time(&state->rx_time);
+ state->rx_time = time(NULL);
break;
case TFTP_EVENT_OACK:
@@ -644,16 +644,16 @@ static CURLcode tftp_rx(struct tftp_state_data *state,
return CURLE_SEND_ERROR;
}
- /* we're ready to RX data */
+ /* we are ready to RX data */
state->state = TFTP_STATE_RX;
- time(&state->rx_time);
+ state->rx_time = time(NULL);
break;
case TFTP_EVENT_TIMEOUT:
/* Increment the retry count and fail if over the limit */
state->retries++;
infof(data,
- "Timeout waiting for block %d ACK. Retries = %d",
+ "Timeout waiting for block %d ACK. Retries = %d",
NEXT_BLOCKNUM(state->block), state->retries);
if(state->retries > state->retry_max) {
state->error = TFTP_ERR_TIMEOUT;
@@ -679,8 +679,8 @@ static CURLcode tftp_rx(struct tftp_state_data *state,
4, SEND_4TH_ARG,
(struct sockaddr *)&state->remote_addr,
state->remote_addrlen);
- /* don't bother with the return code, but if the socket is still up we
- * should be a good TFTP client and let the server know we're done */
+ /* do not bother with the return code, but if the socket is still up we
+ * should be a good TFTP client and let the server know we are done */
state->state = TFTP_STATE_FIN;
break;
@@ -719,13 +719,13 @@ static CURLcode tftp_tx(struct tftp_state_data *state, tftp_event_t event)
int rblock = getrpacketblock(&state->rpacket);
if(rblock != state->block &&
- /* There's a bug in tftpd-hpa that causes it to send us an ack for
- * 65535 when the block number wraps to 0. So when we're expecting
+ /* There is a bug in tftpd-hpa that causes it to send us an ack for
+ * 65535 when the block number wraps to 0. So when we are expecting
* 0, also accept 65535. See
* https://www.syslinux.org/archives/2010-September/015612.html
* */
!(state->block == 0 && rblock == 65535)) {
- /* This isn't the expected block. Log it and up the retry counter */
+ /* This is not the expected block. Log it and up the retry counter */
infof(data, "Received ACK for block %d, expecting %d",
rblock, state->block);
state->retries++;
@@ -738,7 +738,7 @@ static CURLcode tftp_tx(struct tftp_state_data *state, tftp_event_t event)
else {
/* Re-send the data packet */
sbytes = sendto(state->sockfd, (void *)state->spacket.data,
- 4 + state->sbytes, SEND_4TH_ARG,
+ 4 + (SEND_TYPE_ARG3)state->sbytes, SEND_4TH_ARG,
(struct sockaddr *)&state->remote_addr,
state->remote_addrlen);
/* Check all sbytes were sent */
@@ -751,9 +751,9 @@ static CURLcode tftp_tx(struct tftp_state_data *state, tftp_event_t event)
return result;
}
- /* This is the expected packet. Reset the counters and send the next
+ /* This is the expected packet. Reset the counters and send the next
block */
- time(&state->rx_time);
+ state->rx_time = time(NULL);
state->block++;
}
else
@@ -783,7 +783,7 @@ static CURLcode tftp_tx(struct tftp_state_data *state, tftp_event_t event)
} while(state->sbytes < state->blksize && cb);
sbytes = sendto(state->sockfd, (void *) state->spacket.data,
- 4 + state->sbytes, SEND_4TH_ARG,
+ 4 + (SEND_TYPE_ARG3)state->sbytes, SEND_4TH_ARG,
(struct sockaddr *)&state->remote_addr,
state->remote_addrlen);
/* Check all sbytes were sent */
@@ -801,7 +801,7 @@ static CURLcode tftp_tx(struct tftp_state_data *state, tftp_event_t event)
state->retries++;
infof(data, "Timeout waiting for block %d ACK. "
" Retries = %d", NEXT_BLOCKNUM(state->block), state->retries);
- /* Decide if we've had enough */
+ /* Decide if we have had enough */
if(state->retries > state->retry_max) {
state->error = TFTP_ERR_TIMEOUT;
state->state = TFTP_STATE_FIN;
@@ -809,7 +809,7 @@ static CURLcode tftp_tx(struct tftp_state_data *state, tftp_event_t event)
else {
/* Re-send the data packet */
sbytes = sendto(state->sockfd, (void *)state->spacket.data,
- 4 + state->sbytes, SEND_4TH_ARG,
+ 4 + (SEND_TYPE_ARG3)state->sbytes, SEND_4TH_ARG,
(struct sockaddr *)&state->remote_addr,
state->remote_addrlen);
/* Check all sbytes were sent */
@@ -829,8 +829,8 @@ static CURLcode tftp_tx(struct tftp_state_data *state, tftp_event_t event)
(void)sendto(state->sockfd, (void *)state->spacket.data, 4, SEND_4TH_ARG,
(struct sockaddr *)&state->remote_addr,
state->remote_addrlen);
- /* don't bother with the return code, but if the socket is still up we
- * should be a good TFTP client and let the server know we're done */
+ /* do not bother with the return code, but if the socket is still up we
+ * should be a good TFTP client and let the server know we are done */
state->state = TFTP_STATE_FIN;
break;
@@ -1001,7 +1001,7 @@ static CURLcode tftp_connect(struct Curl_easy *data, bool *done)
return CURLE_OUT_OF_MEMORY;
}
- /* we don't keep TFTP connections up basically because there's none or very
+ /* we do not keep TFTP connections up basically because there is none or very
* little gain for UDP */
connclose(conn, "TFTP");
@@ -1032,7 +1032,7 @@ static CURLcode tftp_connect(struct Curl_easy *data, bool *done)
* IPv4 and IPv6...
*/
int rc = bind(state->sockfd, (struct sockaddr *)&state->local_addr,
- conn->remote_addr->addrlen);
+ (curl_socklen_t)conn->remote_addr->addrlen);
if(rc) {
char buffer[STRERROR_LEN];
failf(data, "bind() failed; %s",
@@ -1110,7 +1110,7 @@ static CURLcode tftp_receive_packet(struct Curl_easy *data)
fromlen = sizeof(fromaddr);
state->rbytes = (int)recvfrom(state->sockfd,
(void *)state->rpacket.data,
- state->blksize + 4,
+ (RECV_TYPE_ARG3)state->blksize + 4,
0,
(struct sockaddr *)&fromaddr,
&fromlen);
@@ -1132,7 +1132,7 @@ static CURLcode tftp_receive_packet(struct Curl_easy *data)
switch(state->event) {
case TFTP_EVENT_DATA:
- /* Don't pass to the client empty or retransmitted packets */
+ /* Do not pass to the client empty or retransmitted packets */
if(state->rbytes > 4 &&
(NEXT_BLOCKNUM(state->block) == getrpacketblock(&state->rpacket))) {
result = Curl_client_write(data, CLIENTWRITE_BODY,
@@ -1208,7 +1208,7 @@ static timediff_t tftp_state_timeout(struct Curl_easy *data,
if(current > state->rx_time + state->retry_time) {
if(event)
*event = TFTP_EVENT_TIMEOUT;
- time(&state->rx_time); /* update even though we received nothing */
+ state->rx_time = time(NULL); /* update even though we received nothing */
}
return timeout_ms;
@@ -1241,8 +1241,8 @@ static CURLcode tftp_multi_statemach(struct Curl_easy *data, bool *done)
return result;
*done = (state->state == TFTP_STATE_FIN) ? TRUE : FALSE;
if(*done)
- /* Tell curl we're done */
- Curl_xfer_setup(data, -1, -1, FALSE, -1);
+ /* Tell curl we are done */
+ Curl_xfer_setup_nop(data);
}
else {
/* no timeouts to handle, check our socket */
@@ -1264,8 +1264,8 @@ static CURLcode tftp_multi_statemach(struct Curl_easy *data, bool *done)
return result;
*done = (state->state == TFTP_STATE_FIN) ? TRUE : FALSE;
if(*done)
- /* Tell curl we're done */
- Curl_xfer_setup(data, -1, -1, FALSE, -1);
+ /* Tell curl we are done */
+ Curl_xfer_setup_nop(data);
}
/* if rc == 0, then select() timed out */
}
@@ -1289,7 +1289,7 @@ static CURLcode tftp_doing(struct Curl_easy *data, bool *dophase_done)
DEBUGF(infof(data, "DO phase is complete"));
}
else if(!result) {
- /* The multi code doesn't have this logic for the DOING state so we
+ /* The multi code does not have this logic for the DOING state so we
provide it for TFTP since it may do the entire transfer in this
state. */
if(Curl_pgrsUpdate(data))
@@ -1376,7 +1376,7 @@ static CURLcode tftp_setup_connection(struct Curl_easy *data,
conn->transport = TRNSPRT_UDP;
/* TFTP URLs support an extension like ";mode=<typecode>" that
- * we'll try to get now! */
+ * we will try to get now! */
type = strstr(data->state.up.path, ";mode=");
if(!type)
diff --git a/libs/libcurl/src/timediff.h b/libs/libcurl/src/timediff.h
index 916fefa827..f8d17047c3 100644
--- a/libs/libcurl/src/timediff.h
+++ b/libs/libcurl/src/timediff.h
@@ -26,7 +26,7 @@
#include "curl_setup.h"
-/* Use a larger type even for 32 bit time_t systems so that we can keep
+/* Use a larger type even for 32-bit time_t systems so that we can keep
microsecond accuracy in it */
typedef curl_off_t timediff_t;
#define CURL_FORMAT_TIMEDIFF_T CURL_FORMAT_CURL_OFF_T
diff --git a/libs/libcurl/src/timeval.c b/libs/libcurl/src/timeval.c
index a15e138e25..5deed16fba 100644
--- a/libs/libcurl/src/timeval.c
+++ b/libs/libcurl/src/timeval.c
@@ -51,8 +51,8 @@ struct curltime Curl_now(void)
#pragma warning(pop)
#endif
- now.tv_sec = milliseconds / 1000;
- now.tv_usec = (milliseconds % 1000) * 1000;
+ now.tv_sec = (time_t)(milliseconds / 1000);
+ now.tv_usec = (int)((milliseconds % 1000) * 1000);
}
return now;
}
@@ -77,7 +77,7 @@ struct curltime Curl_now(void)
/*
** clock_gettime() may be defined by Apple's SDK as weak symbol thus
- ** code compiles but fails during run-time if clock_gettime() is
+ ** code compiles but fails during runtime if clock_gettime() is
** called on unsupported OS version.
*/
#if defined(__APPLE__) && defined(HAVE_BUILTIN_AVAILABLE) && \
@@ -95,7 +95,7 @@ struct curltime Curl_now(void)
#endif
(0 == clock_gettime(CLOCK_MONOTONIC_RAW, &tsnow))) {
cnow.tv_sec = tsnow.tv_sec;
- cnow.tv_usec = (unsigned int)(tsnow.tv_nsec / 1000);
+ cnow.tv_usec = (int)(tsnow.tv_nsec / 1000);
}
else
#endif
@@ -107,18 +107,18 @@ struct curltime Curl_now(void)
#endif
(0 == clock_gettime(CLOCK_MONOTONIC, &tsnow))) {
cnow.tv_sec = tsnow.tv_sec;
- cnow.tv_usec = (unsigned int)(tsnow.tv_nsec / 1000);
+ cnow.tv_usec = (int)(tsnow.tv_nsec / 1000);
}
/*
** Even when the configure process has truly detected monotonic clock
** availability, it might happen that it is not actually available at
- ** run-time. When this occurs simply fallback to other time source.
+ ** runtime. When this occurs simply fallback to other time source.
*/
#ifdef HAVE_GETTIMEOFDAY
else {
(void)gettimeofday(&now, NULL);
cnow.tv_sec = now.tv_sec;
- cnow.tv_usec = (unsigned int)now.tv_usec;
+ cnow.tv_usec = (int)now.tv_usec;
}
#else
else {
diff --git a/libs/libcurl/src/transfer.c b/libs/libcurl/src/transfer.c
index 257932b0fa..b21ada91a5 100644
--- a/libs/libcurl/src/transfer.c
+++ b/libs/libcurl/src/transfer.c
@@ -53,7 +53,7 @@
#endif
#ifndef HAVE_SOCKET
-#error "We can't compile without socket() support!"
+#error "We cannot compile without socket() support!"
#endif
#include "urldata.h"
@@ -160,6 +160,30 @@ bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc)
return TRUE;
}
+static CURLcode xfer_recv_shutdown(struct Curl_easy *data, bool *done)
+{
+ int sockindex;
+
+ if(!data || !data->conn)
+ return CURLE_FAILED_INIT;
+ if(data->conn->sockfd == CURL_SOCKET_BAD)
+ return CURLE_FAILED_INIT;
+ sockindex = (data->conn->sockfd == data->conn->sock[SECONDARYSOCKET]);
+ return Curl_conn_shutdown(data, sockindex, done);
+}
+
+static bool xfer_recv_shutdown_started(struct Curl_easy *data)
+{
+ int sockindex;
+
+ if(!data || !data->conn)
+ return CURLE_FAILED_INIT;
+ if(data->conn->sockfd == CURL_SOCKET_BAD)
+ return CURLE_FAILED_INIT;
+ sockindex = (data->conn->sockfd == data->conn->sock[SECONDARYSOCKET]);
+ return Curl_shutdown_started(data, sockindex);
+}
+
/**
* Receive raw response data for the transfer.
* @param data the transfer
@@ -186,17 +210,35 @@ static ssize_t Curl_xfer_recv_resp(struct Curl_easy *data,
else if(totalleft < (curl_off_t)blen)
blen = (size_t)totalleft;
}
+ else if(xfer_recv_shutdown_started(data)) {
+ /* we already reveived everything. Do not try more. */
+ blen = 0;
+ }
if(!blen) {
- /* want nothing - continue as if read nothing. */
- DEBUGF(infof(data, "readwrite_data: we're done"));
+ /* want nothing more */
*err = CURLE_OK;
- return 0;
+ nread = 0;
+ }
+ else {
+ *err = Curl_xfer_recv(data, buf, blen, &nread);
}
- *err = Curl_xfer_recv(data, buf, blen, &nread);
if(*err)
return -1;
+ if(nread == 0) {
+ if(data->req.shutdown) {
+ bool done;
+ *err = xfer_recv_shutdown(data, &done);
+ if(*err)
+ return -1;
+ if(!done) {
+ *err = CURLE_AGAIN;
+ return -1;
+ }
+ }
+ DEBUGF(infof(data, "readwrite_data: we are done"));
+ }
DEBUGASSERT(nread >= 0);
return nread;
}
@@ -271,7 +313,10 @@ static CURLcode readwrite_data(struct Curl_easy *data,
DEBUGF(infof(data, "nread == 0, stream closed, bailing"));
else
DEBUGF(infof(data, "nread <= 0, server closed connection, bailing"));
- k->keepon &= ~(KEEP_RECV|KEEP_SEND); /* stop sending as well */
+ /* stop receiving and ALL sending as well, including PAUSE and HOLD.
+ * We might still be paused on receive client writes though, so
+ * keep those bits around. */
+ k->keepon &= ~(KEEP_RECV|KEEP_SENDBITS);
if(k->eos_written) /* already did write this to client, leave */
break;
}
@@ -291,10 +336,11 @@ static CURLcode readwrite_data(struct Curl_easy *data,
if((k->keepon & KEEP_RECV_PAUSE) || !(k->keepon & KEEP_RECV))
break;
- } while(maxloops-- && data_pending(data));
+ } while(maxloops--);
- if(maxloops <= 0) {
- /* did not read until EAGAIN, mark read-again-please */
+ if((maxloops <= 0) || data_pending(data)) {
+ /* did not read until EAGAIN or there is still pending data, mark as
+ read-again-please */
data->state.select_bits = CURL_CSELECT_IN;
if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
data->state.select_bits |= CURL_CSELECT_OUT;
@@ -302,8 +348,8 @@ static CURLcode readwrite_data(struct Curl_easy *data,
if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
(conn->bits.close || is_multiplex)) {
- /* When we've read the entire thing and the close bit is set, the server
- may now close the connection. If there's now any kind of sending going
+ /* When we have read the entire thing and the close bit is set, the server
+ may now close the connection. If there is now any kind of sending going
on from our side, we need to stop that immediately. */
infof(data, "we are done reading and this is set to close, stop send");
k->keepon &= ~KEEP_SEND; /* no writing anymore either */
@@ -317,37 +363,11 @@ out:
return result;
}
-#if defined(_WIN32) && defined(USE_WINSOCK)
-#ifndef SIO_IDEAL_SEND_BACKLOG_QUERY
-#define SIO_IDEAL_SEND_BACKLOG_QUERY 0x4004747B
-#endif
-
-static void win_update_buffer_size(curl_socket_t sockfd)
-{
- int result;
- ULONG ideal;
- DWORD ideallen;
- result = WSAIoctl(sockfd, SIO_IDEAL_SEND_BACKLOG_QUERY, 0, 0,
- &ideal, sizeof(ideal), &ideallen, 0, 0);
- if(result == 0) {
- setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF,
- (const char *)&ideal, sizeof(ideal));
- }
-}
-#else
-#define win_update_buffer_size(x)
-#endif
-
-#define curl_upload_refill_watermark(data) \
- ((size_t)((data)->set.upload_buffer_size >> 5))
-
/*
* Send data to upload to the server, when the socket is writable.
*/
static CURLcode readwrite_upload(struct Curl_easy *data, int *didwhat)
{
- CURLcode result = CURLE_OK;
-
if((data->req.keepon & KEEP_SEND_PAUSE))
return CURLE_OK;
@@ -358,23 +378,9 @@ static CURLcode readwrite_upload(struct Curl_easy *data, int *didwhat)
if(!Curl_req_done_sending(data)) {
*didwhat |= KEEP_SEND;
- result = Curl_req_send_more(data);
- if(result)
- return result;
-
-#if defined(_WIN32) && defined(USE_WINSOCK)
- /* FIXME: this looks like it would fit better into cf-socket.c
- * but then I do not know enough Windows to say... */
- {
- struct curltime n = Curl_now();
- if(Curl_timediff(n, data->conn->last_sndbuf_update) > 1000) {
- win_update_buffer_size(data->conn->writesockfd);
- data->conn->last_sndbuf_update = n;
- }
- }
-#endif
+ return Curl_req_send_more(data);
}
- return result;
+ return CURLE_OK;
}
static int select_bits_paused(struct Curl_easy *data, int select_bits)
@@ -408,14 +414,6 @@ CURLcode Curl_readwrite(struct Curl_easy *data)
int didwhat = 0;
int select_bits;
- /* Check if client writes had been paused and can resume now. */
- if(!(k->keepon & KEEP_RECV_PAUSE) && Curl_cwriter_is_paused(data)) {
- Curl_conn_ev_data_pause(data, FALSE);
- result = Curl_cwriter_unpause(data);
- if(result)
- goto out;
- }
-
if(data->state.select_bits) {
if(select_bits_paused(data, data->state.select_bits)) {
/* leave the bits unchanged, so they'll tell us what to do when
@@ -522,9 +520,9 @@ CURLcode Curl_readwrite(struct Curl_easy *data)
if(!(data->req.no_body) && (k->size != -1) &&
(k->bytecount != k->size) &&
#ifdef CURL_DO_LINEEND_CONV
- /* Most FTP servers don't adjust their file SIZE response for CRLFs,
- so we'll check to see if the discrepancy can be explained
- by the number of CRLFs we've changed to LFs.
+ /* Most FTP servers do not adjust their file SIZE response for CRLFs,
+ so we will check to see if the discrepancy can be explained
+ by the number of CRLFs we have changed to LFs.
*/
(k->bytecount != (k->size + data->state.crlf_conversions)) &&
#endif /* CURL_DO_LINEEND_CONV */
@@ -569,7 +567,7 @@ CURLcode Curl_pretransfer(struct Curl_easy *data)
CURLcode result;
if(!data->state.url && !data->set.uh) {
- /* we can't do anything without URL */
+ /* we cannot do anything without URL */
failf(data, "No URL set");
return CURLE_URL_MALFORMAT;
}
@@ -593,7 +591,7 @@ CURLcode Curl_pretransfer(struct Curl_easy *data)
}
if(data->set.postfields && data->set.set_resume_from) {
- /* we can't */
+ /* we cannot */
failf(data, "cannot mix POSTFIELDS with RESUME_FROM");
return CURLE_BAD_FUNCTION_ARGUMENT;
}
@@ -695,7 +693,7 @@ CURLcode Curl_pretransfer(struct Curl_easy *data)
/*
* Set user-agent. Used for HTTP, but since we can attempt to tunnel
- * basically anything through an HTTP proxy we can't limit this based on
+ * basically anything through an HTTP proxy we cannot limit this based on
* protocol.
*/
if(data->set.str[STRING_USERAGENT]) {
@@ -727,22 +725,6 @@ CURLcode Curl_pretransfer(struct Curl_easy *data)
}
/*
- * Curl_posttransfer() is called immediately after a transfer ends
- */
-CURLcode Curl_posttransfer(struct Curl_easy *data)
-{
-#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
- /* restore the signal handler for SIGPIPE before we get back */
- if(!data->set.no_signal)
- signal(SIGPIPE, data->state.prev_signal);
-#else
- (void)data; /* unused parameter */
-#endif
-
- return CURLE_OK;
-}
-
-/*
* Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
* as given by the remote server and set up the new URL to request.
*
@@ -823,16 +805,16 @@ CURLcode Curl_follow(struct Curl_easy *data,
(data->req.httpcode != 401) && (data->req.httpcode != 407) &&
Curl_is_absolute_url(newurl, NULL, 0, FALSE)) {
/* If this is not redirect due to a 401 or 407 response and an absolute
- URL: don't allow a custom port number */
+ URL: do not allow a custom port number */
disallowport = TRUE;
}
DEBUGASSERT(data->state.uh);
- uc = curl_url_set(data->state.uh, CURLUPART_URL, newurl,
- (type == FOLLOW_FAKE) ? CURLU_NON_SUPPORT_SCHEME :
- ((type == FOLLOW_REDIR) ? CURLU_URLENCODE : 0) |
- CURLU_ALLOW_SPACE |
- (data->set.path_as_is ? CURLU_PATH_AS_IS : 0));
+ uc = curl_url_set(data->state.uh, CURLUPART_URL, newurl, (unsigned int)
+ ((type == FOLLOW_FAKE) ? CURLU_NON_SUPPORT_SCHEME :
+ ((type == FOLLOW_REDIR) ? CURLU_URLENCODE : 0) |
+ CURLU_ALLOW_SPACE |
+ (data->set.path_as_is ? CURLU_PATH_AS_IS : 0)));
if(uc) {
if(type != FOLLOW_FAKE) {
failf(data, "The redirect target URL could not be parsed: %s",
@@ -901,8 +883,8 @@ CURLcode Curl_follow(struct Curl_easy *data,
}
if(type == FOLLOW_FAKE) {
- /* we're only figuring out the new url if we would've followed locations
- but now we're done so we can get out! */
+ /* we are only figuring out the new URL if we would have followed locations
+ but now we are done so we can get out! */
data->info.wouldredirect = newurl;
if(reachedmax) {
@@ -939,15 +921,15 @@ CURLcode Curl_follow(struct Curl_easy *data,
/* 306 - Not used */
/* 307 - Temporary Redirect */
default: /* for all above (and the unknown ones) */
- /* Some codes are explicitly mentioned since I've checked RFC2616 and they
- * seem to be OK to POST to.
+ /* Some codes are explicitly mentioned since I have checked RFC2616 and
+ * they seem to be OK to POST to.
*/
break;
case 301: /* Moved Permanently */
/* (quote from RFC7231, section 6.4.2)
*
* Note: For historical reasons, a user agent MAY change the request
- * method from POST to GET for the subsequent request. If this
+ * method from POST to GET for the subsequent request. If this
* behavior is undesired, the 307 (Temporary Redirect) status code
* can be used instead.
*
@@ -973,7 +955,7 @@ CURLcode Curl_follow(struct Curl_easy *data,
/* (quote from RFC7231, section 6.4.3)
*
* Note: For historical reasons, a user agent MAY change the request
- * method from POST to GET for the subsequent request. If this
+ * method from POST to GET for the subsequent request. If this
* behavior is undesired, the 307 (Temporary Redirect) status code
* can be used instead.
*
@@ -1014,14 +996,14 @@ CURLcode Curl_follow(struct Curl_easy *data,
break;
case 304: /* Not Modified */
/* 304 means we did a conditional request and it was "Not modified".
- * We shouldn't get any Location: header in this response!
+ * We should not get any Location: header in this response!
*/
break;
case 305: /* Use Proxy */
/* (quote from RFC2616, section 10.3.6):
* "The requested resource MUST be accessed through the proxy given
* by the Location field. The Location field gives the URI of the
- * proxy. The recipient is expected to repeat this single request
+ * proxy. The recipient is expected to repeat this single request
* via the proxy. 305 responses MUST only be generated by origin
* servers."
*/
@@ -1043,8 +1025,9 @@ CURLcode Curl_retry_request(struct Curl_easy *data, char **url)
bool retry = FALSE;
*url = NULL;
- /* if we're talking upload, we can't do the checks below, unless the protocol
- is HTTP as when uploading over HTTP we will still get a response */
+ /* if we are talking upload, we cannot do the checks below, unless the
+ protocol is HTTP as when uploading over HTTP we will still get a
+ response */
if(data->state.upload &&
!(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
return CURLE_OK;
@@ -1090,7 +1073,7 @@ CURLcode Curl_retry_request(struct Curl_easy *data, char **url)
return CURLE_OUT_OF_MEMORY;
connclose(conn, "retry"); /* close this connection */
- conn->bits.retry = TRUE; /* mark this as a connection we're about
+ conn->bits.retry = TRUE; /* mark this as a connection we are about
to retry. Marking it this way should
prevent i.e HTTP transfers to return
error just because nothing has been
@@ -1101,16 +1084,17 @@ CURLcode Curl_retry_request(struct Curl_easy *data, char **url)
}
/*
- * Curl_xfer_setup() is called to setup some basic properties for the
- * upcoming transfer.
+ * xfer_setup() is called to setup basic properties for the transfer.
*/
-void Curl_xfer_setup(
+static void xfer_setup(
struct Curl_easy *data, /* transfer */
int sockindex, /* socket index to read from or -1 */
curl_off_t size, /* -1 if unknown at this point */
bool getheader, /* TRUE if header parsing is wanted */
- int writesockindex /* socket index to write to, it may very well be
+ int writesockindex, /* socket index to write to, it may very well be
the same we read from. -1 disables */
+ bool shutdown /* shutdown connection at transfer end. Only
+ * supported when sending OR receiving. */
)
{
struct SingleRequest *k = &data->req;
@@ -1120,6 +1104,7 @@ void Curl_xfer_setup(
DEBUGASSERT(conn != NULL);
DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
DEBUGASSERT((writesockindex <= 1) && (writesockindex >= -1));
+ DEBUGASSERT(!shutdown || (sockindex == -1) || (writesockindex == -1));
if(conn->bits.multiplex || conn->httpversion >= 20 || want_send) {
/* when multiplexing, the read/write sockets need to be the same! */
@@ -1137,9 +1122,10 @@ void Curl_xfer_setup(
conn->writesockfd = writesockindex == -1 ?
CURL_SOCKET_BAD:conn->sock[writesockindex];
}
- k->getheader = getheader;
+ k->getheader = getheader;
k->size = size;
+ k->shutdown = shutdown;
/* The code sequence below is placed in this function just because all
necessary input is not always known in do_complete() as this function may
@@ -1150,7 +1136,7 @@ void Curl_xfer_setup(
if(size > 0)
Curl_pgrsSetDownloadSize(data, size);
}
- /* we want header and/or body, if neither then don't do this! */
+ /* we want header and/or body, if neither then do not do this! */
if(k->getheader || !data->req.no_body) {
if(sockindex != -1)
@@ -1162,6 +1148,33 @@ void Curl_xfer_setup(
}
+void Curl_xfer_setup_nop(struct Curl_easy *data)
+{
+ xfer_setup(data, -1, -1, FALSE, -1, FALSE);
+}
+
+void Curl_xfer_setup1(struct Curl_easy *data,
+ int send_recv,
+ curl_off_t recv_size,
+ bool getheader)
+{
+ int recv_index = (send_recv & CURL_XFER_RECV)? FIRSTSOCKET : -1;
+ int send_index = (send_recv & CURL_XFER_SEND)? FIRSTSOCKET : -1;
+ DEBUGASSERT((recv_index >= 0) || (recv_size == -1));
+ xfer_setup(data, recv_index, recv_size, getheader, send_index, FALSE);
+}
+
+void Curl_xfer_setup2(struct Curl_easy *data,
+ int send_recv,
+ curl_off_t recv_size,
+ bool shutdown)
+{
+ int recv_index = (send_recv & CURL_XFER_RECV)? SECONDARYSOCKET : -1;
+ int send_index = (send_recv & CURL_XFER_SEND)? SECONDARYSOCKET : -1;
+ DEBUGASSERT((recv_index >= 0) || (recv_size == -1));
+ xfer_setup(data, recv_index, recv_size, FALSE, send_index, shutdown);
+}
+
CURLcode Curl_xfer_write_resp(struct Curl_easy *data,
const char *buf, size_t blen,
bool is_eos)
@@ -1276,3 +1289,27 @@ CURLcode Curl_xfer_send_close(struct Curl_easy *data)
Curl_conn_ev_data_done_send(data);
return CURLE_OK;
}
+
+CURLcode Curl_xfer_send_shutdown(struct Curl_easy *data, bool *done)
+{
+ int sockindex;
+
+ if(!data || !data->conn)
+ return CURLE_FAILED_INIT;
+ if(data->conn->writesockfd == CURL_SOCKET_BAD)
+ return CURLE_FAILED_INIT;
+ sockindex = (data->conn->writesockfd == data->conn->sock[SECONDARYSOCKET]);
+ return Curl_conn_shutdown(data, sockindex, done);
+}
+
+bool Curl_xfer_is_blocked(struct Curl_easy *data)
+{
+ bool want_send = ((data)->req.keepon & KEEP_SEND);
+ bool want_recv = ((data)->req.keepon & KEEP_RECV);
+ if(!want_send)
+ return (want_recv && Curl_cwriter_is_paused(data));
+ else if(!want_recv)
+ return (want_send && Curl_creader_is_paused(data));
+ else
+ return Curl_creader_is_paused(data) && Curl_cwriter_is_paused(data);
+}
diff --git a/libs/libcurl/src/transfer.h b/libs/libcurl/src/transfer.h
index 1e9e1a4d69..04adc30fac 100644
--- a/libs/libcurl/src/transfer.h
+++ b/libs/libcurl/src/transfer.h
@@ -32,7 +32,6 @@ char *Curl_checkheaders(const struct Curl_easy *data,
void Curl_init_CONNECT(struct Curl_easy *data);
CURLcode Curl_pretransfer(struct Curl_easy *data);
-CURLcode Curl_posttransfer(struct Curl_easy *data);
typedef enum {
FOLLOW_NONE, /* not used within the function, just a placeholder to
@@ -76,15 +75,37 @@ CURLcode Curl_xfer_write_resp(struct Curl_easy *data,
CURLcode Curl_xfer_write_resp_hd(struct Curl_easy *data,
const char *hd0, size_t hdlen, bool is_eos);
-/* This sets up a forthcoming transfer */
-void Curl_xfer_setup(struct Curl_easy *data,
- int sockindex, /* socket index to read from or -1 */
- curl_off_t size, /* -1 if unknown at this point */
- bool getheader, /* TRUE if header parsing is wanted */
- int writesockindex /* socket index to write to. May be
- the same we read from. -1
- disables */
- );
+#define CURL_XFER_NOP (0)
+#define CURL_XFER_RECV (1<<(0))
+#define CURL_XFER_SEND (1<<(1))
+#define CURL_XFER_SENDRECV (CURL_XFER_RECV|CURL_XFER_SEND)
+
+/**
+ * The transfer is neither receiving nor sending now.
+ */
+void Curl_xfer_setup_nop(struct Curl_easy *data);
+
+/**
+ * The transfer will use socket 1 to send/recv. `recv_size` is
+ * the amount to receive or -1 if unknown. `getheader` indicates
+ * response header processing is expected.
+ */
+void Curl_xfer_setup1(struct Curl_easy *data,
+ int send_recv,
+ curl_off_t recv_size,
+ bool getheader);
+
+/**
+ * The transfer will use socket 2 to send/recv. `recv_size` is
+ * the amount to receive or -1 if unknown. With `shutdown` being
+ * set, the transfer is only allowed to either send OR receive
+ * and the socket 2 connection will be shutdown at the end of
+ * the transfer. An unclean shutdown will fail the transfer.
+ */
+void Curl_xfer_setup2(struct Curl_easy *data,
+ int send_recv,
+ curl_off_t recv_size,
+ bool shutdown);
/**
* Multi has set transfer to DONE. Last chance to trigger
@@ -111,5 +132,13 @@ CURLcode Curl_xfer_recv(struct Curl_easy *data,
ssize_t *pnrcvd);
CURLcode Curl_xfer_send_close(struct Curl_easy *data);
+CURLcode Curl_xfer_send_shutdown(struct Curl_easy *data, bool *done);
+
+/**
+ * Return TRUE iff the transfer is not done, but further progress
+ * is blocked. For example when it is only receiving and its writer
+ * is PAUSED.
+ */
+bool Curl_xfer_is_blocked(struct Curl_easy *data);
#endif /* HEADER_CURL_TRANSFER_H */
diff --git a/libs/libcurl/src/url.c b/libs/libcurl/src/url.c
index aa03c1d676..0f623b955b 100644
--- a/libs/libcurl/src/url.c
+++ b/libs/libcurl/src/url.c
@@ -56,7 +56,7 @@
#endif
#ifndef HAVE_SOCKET
-#error "We can't compile without socket() support!"
+#error "We cannot compile without socket() support!"
#endif
#include <limits.h>
@@ -136,7 +136,7 @@ static void data_priority_cleanup(struct Curl_easy *data);
#endif
/* Some parts of the code (e.g. chunked encoding) assume this buffer has at
- * more than just a few bytes to play with. Don't let it become too small or
+ * more than just a few bytes to play with. Do not let it become too small or
* bad things will happen.
*/
#if READBUFFER_SIZE < READBUFFER_MIN
@@ -260,7 +260,7 @@ CURLcode Curl_close(struct Curl_easy **datap)
if(data->state.rangestringalloc)
free(data->state.range);
- /* freed here just in case DONE wasn't called */
+ /* freed here just in case DONE was not called */
Curl_req_free(&data->req, data);
/* Close down all open SSL info and sessions */
@@ -365,7 +365,7 @@ CURLcode Curl_init_userdefined(struct Curl_easy *data)
set->seek_client = ZERO_NULL;
- set->filesize = -1; /* we don't know the size */
+ set->filesize = -1; /* we do not know the size */
set->postfieldsize = -1; /* unknown size */
set->maxredirs = 30; /* sensible default */
@@ -467,6 +467,7 @@ CURLcode Curl_init_userdefined(struct Curl_easy *data)
set->tcp_keepalive = FALSE;
set->tcp_keepintvl = 60;
set->tcp_keepidle = 60;
+ set->tcp_keepcnt = 9;
set->tcp_fastopen = FALSE;
set->tcp_nodelay = TRUE;
set->ssl_enable_alpn = TRUE;
@@ -555,19 +556,7 @@ CURLcode Curl_open(struct Curl_easy **curl)
return result;
}
-static void conn_shutdown(struct Curl_easy *data)
-{
- DEBUGASSERT(data);
- infof(data, "Closing connection");
-
- /* possible left-overs from the async name resolvers */
- Curl_resolver_cancel(data);
-
- Curl_conn_close(data, SECONDARYSOCKET);
- Curl_conn_close(data, FIRSTSOCKET);
-}
-
-static void conn_free(struct Curl_easy *data, struct connectdata *conn)
+void Curl_conn_free(struct Curl_easy *data, struct connectdata *conn)
{
size_t i;
@@ -594,8 +583,8 @@ static void conn_free(struct Curl_easy *data, struct connectdata *conn)
Curl_safefree(conn->sasl_authzid);
Curl_safefree(conn->options);
Curl_safefree(conn->oauth_bearer);
- Curl_safefree(conn->host.rawalloc); /* host name buffer */
- Curl_safefree(conn->conn_to_host.rawalloc); /* host name buffer */
+ Curl_safefree(conn->host.rawalloc); /* hostname buffer */
+ Curl_safefree(conn->conn_to_host.rawalloc); /* hostname buffer */
Curl_safefree(conn->hostname_resolve);
Curl_safefree(conn->secondaryhostname);
Curl_safefree(conn->localdev);
@@ -618,12 +607,10 @@ static void conn_free(struct Curl_easy *data, struct connectdata *conn)
* disassociated from an easy handle.
*
* This function MUST NOT reset state in the Curl_easy struct if that
- * isn't strictly bound to the life-time of *this* particular connection.
- *
+ * is not strictly bound to the life-time of *this* particular connection.
*/
-
void Curl_disconnect(struct Curl_easy *data,
- struct connectdata *conn, bool dead_connection)
+ struct connectdata *conn, bool aborted)
{
/* there must be a connection to close */
DEBUGASSERT(conn);
@@ -638,13 +625,14 @@ void Curl_disconnect(struct Curl_easy *data,
DEBUGASSERT(!data->conn);
DEBUGF(infof(data, "Curl_disconnect(conn #%"
- CURL_FORMAT_CURL_OFF_T ", dead=%d)",
- conn->connection_id, dead_connection));
+ CURL_FORMAT_CURL_OFF_T ", aborted=%d)",
+ conn->connection_id, aborted));
+
/*
- * If this connection isn't marked to force-close, leave it open if there
+ * If this connection is not marked to force-close, leave it open if there
* are other users of it
*/
- if(CONN_INUSE(conn) && !dead_connection) {
+ if(CONN_INUSE(conn) && !aborted) {
DEBUGF(infof(data, "Curl_disconnect when inuse: %zu", CONN_INUSE(conn)));
return;
}
@@ -661,23 +649,10 @@ void Curl_disconnect(struct Curl_easy *data,
Curl_http_auth_cleanup_negotiate(conn);
if(conn->connect_only)
- /* treat the connection as dead in CONNECT_ONLY situations */
- dead_connection = TRUE;
-
- /* temporarily attach the connection to this transfer handle for the
- disconnect and shutdown */
- Curl_attach_connection(data, conn);
-
- if(conn->handler && conn->handler->disconnect)
- /* This is set if protocol-specific cleanups should be made */
- conn->handler->disconnect(data, conn, dead_connection);
+ /* treat the connection as aborted in CONNECT_ONLY situations */
+ aborted = TRUE;
- conn_shutdown(data);
-
- /* detach it again */
- Curl_detach_connection(data);
-
- conn_free(data, conn);
+ Curl_conncache_disconnect(data, conn, aborted);
}
/*
@@ -735,7 +710,7 @@ socks_proxy_info_matches(const struct proxy_info *data,
return TRUE;
}
#else
-/* disabled, won't get called */
+/* disabled, will not get called */
#define proxy_info_matches(x,y) FALSE
#define socks_proxy_info_matches(x,y) FALSE
#endif
@@ -786,7 +761,7 @@ static bool prune_if_dead(struct connectdata *conn,
struct Curl_easy *data)
{
if(!CONN_INUSE(conn)) {
- /* The check for a dead socket makes sense only if the connection isn't in
+ /* The check for a dead socket makes sense only if the connection is not in
use */
bool dead;
struct curltime now = Curl_now();
@@ -823,6 +798,7 @@ static bool prune_if_dead(struct connectdata *conn,
* any time (HTTP/2 PING for example), the protocol handler needs
* to install its own `connection_check` callback.
*/
+ DEBUGF(infof(data, "connection has input pending, not reusable"));
dead = TRUE;
}
Curl_detach_connection(data);
@@ -880,8 +856,8 @@ static void prune_dead_connections(struct Curl_easy *data)
/* connection previously removed from cache in prune_if_dead() */
- /* disconnect it */
- Curl_disconnect(data, pruned, TRUE);
+ /* disconnect it, do not treat as aborted */
+ Curl_disconnect(data, pruned, FALSE);
}
CONNCACHE_LOCK(data);
data->state.conn_cache->last_cleanup = now;
@@ -961,12 +937,12 @@ ConnectionExists(struct Curl_easy *data,
if(IsMultiplexingPossible(data, needle)) {
if(bundle->multiuse == BUNDLE_UNKNOWN) {
if(data->set.pipewait) {
- infof(data, "Server doesn't support multiplex yet, wait");
+ infof(data, "Server does not support multiplex yet, wait");
*waitpipe = TRUE;
CONNCACHE_UNLOCK(data);
return FALSE; /* no reuse */
}
- infof(data, "Server doesn't support multiplex (yet)");
+ infof(data, "Server does not support multiplex (yet)");
}
else if(bundle->multiuse == BUNDLE_MULTIPLEX) {
if(Curl_multiplex_wanted(data->multi))
@@ -1001,15 +977,15 @@ ConnectionExists(struct Curl_easy *data,
if(!canmultiplex) {
if(Curl_resolver_asynch() &&
- /* remote_ip[0] is NUL only if the resolving of the name hasn't
- completed yet and until then we don't reuse this connection */
+ /* remote_ip[0] is NUL only if the resolving of the name has not
+ completed yet and until then we do not reuse this connection */
!check->primary.remote_ip[0])
continue;
}
if(CONN_INUSE(check)) {
if(!canmultiplex) {
- /* transfer can't be multiplexed and check is in use */
+ /* transfer cannot be multiplexed and check is in use */
continue;
}
else {
@@ -1023,9 +999,9 @@ ConnectionExists(struct Curl_easy *data,
if(!Curl_conn_is_connected(check, FIRSTSOCKET)) {
foundPendingCandidate = TRUE;
- /* Don't pick a connection that hasn't connected yet */
+ /* Do not pick a connection that has not connected yet */
infof(data, "Connection #%" CURL_FORMAT_CURL_OFF_T
- " isn't open enough, can't reuse", check->connection_id);
+ " is not open enough, cannot reuse", check->connection_id);
continue;
}
@@ -1050,20 +1026,20 @@ ConnectionExists(struct Curl_easy *data,
if((needle->handler->flags&PROTOPT_SSL) !=
(check->handler->flags&PROTOPT_SSL))
- /* don't do mixed SSL and non-SSL connections */
+ /* do not do mixed SSL and non-SSL connections */
if(get_protocol_family(check->handler) !=
needle->handler->protocol || !check->bits.tls_upgraded)
/* except protocols that have been upgraded via TLS */
continue;
if(needle->bits.conn_to_host != check->bits.conn_to_host)
- /* don't mix connections that use the "connect to host" feature and
- * connections that don't use this feature */
+ /* do not mix connections that use the "connect to host" feature and
+ * connections that do not use this feature */
continue;
if(needle->bits.conn_to_port != check->bits.conn_to_port)
- /* don't mix connections that use the "connect to port" feature and
- * connections that don't use this feature */
+ /* do not mix connections that use the "connect to port" feature and
+ * connections that do not use this feature */
continue;
#ifndef CURL_DISABLE_PROXY
@@ -1091,7 +1067,7 @@ ConnectionExists(struct Curl_easy *data,
if(!Curl_ssl_conn_config_match(data, check, TRUE)) {
DEBUGF(infof(data,
"Connection #%" CURL_FORMAT_CURL_OFF_T
- " has different SSL proxy parameters, can't reuse",
+ " has different SSL proxy parameters, cannot reuse",
check->connection_id));
continue;
}
@@ -1103,18 +1079,18 @@ ConnectionExists(struct Curl_easy *data,
if(h2upgrade && !check->httpversion && canmultiplex) {
if(data->set.pipewait) {
- infof(data, "Server upgrade doesn't support multiplex yet, wait");
+ infof(data, "Server upgrade does not support multiplex yet, wait");
*waitpipe = TRUE;
CONNCACHE_UNLOCK(data);
return FALSE; /* no reuse */
}
infof(data, "Server upgrade cannot be used");
- continue; /* can't be used atm */
+ continue; /* cannot be used atm */
}
if(needle->localdev || needle->localport) {
/* If we are bound to a specific local end (IP+port), we must not
- reuse a random other one, although if we didn't ask for a
+ reuse a random other one, although if we did not ask for a
particular one we can reuse one that was bound.
This comparison is a bit rough and too strict. Since the input
@@ -1122,7 +1098,7 @@ ConnectionExists(struct Curl_easy *data,
same it would take a lot of processing to make it really accurate.
Instead, this matching will assume that reuses of bound connections
will most likely also reuse the exact same binding parameters and
- missing out a few edge cases shouldn't hurt anyone very much.
+ missing out a few edge cases should not hurt anyone very much.
*/
if((check->localport != needle->localport) ||
(check->localportrange != needle->localportrange) ||
@@ -1133,7 +1109,7 @@ ConnectionExists(struct Curl_easy *data,
if(!(needle->handler->flags & PROTOPT_CREDSPERREQUEST)) {
/* This protocol requires credentials per connection,
- so verify that we're using the same name and password as well */
+ so verify that we are using the same name and password as well */
if(Curl_timestrcmp(needle->user, check->user) ||
Curl_timestrcmp(needle->passwd, check->passwd) ||
Curl_timestrcmp(needle->sasl_authzid, check->sasl_authzid) ||
@@ -1176,7 +1152,7 @@ ConnectionExists(struct Curl_easy *data,
#endif
/* Additional match requirements if talking TLS OR
- * not talking to a HTTP proxy OR using a tunnel through a proxy */
+ * not talking to an HTTP proxy OR using a tunnel through a proxy */
if((needle->handler->flags&PROTOPT_SSL)
#ifndef CURL_DISABLE_PROXY
|| !needle->bits.httpproxy || needle->bits.tunnel_proxy
@@ -1206,7 +1182,7 @@ ConnectionExists(struct Curl_easy *data,
!Curl_ssl_conn_config_match(data, check, FALSE)) {
DEBUGF(infof(data,
"Connection #%" CURL_FORMAT_CURL_OFF_T
- " has different SSL parameters, can't reuse",
+ " has different SSL parameters, cannot reuse",
check->connection_id));
continue;
}
@@ -1230,7 +1206,7 @@ ConnectionExists(struct Curl_easy *data,
}
}
else if(check->http_ntlm_state != NTLMSTATE_NONE) {
- /* Connection is using NTLM auth but we don't want NTLM */
+ /* Connection is using NTLM auth but we do not want NTLM */
continue;
}
@@ -1249,7 +1225,7 @@ ConnectionExists(struct Curl_easy *data,
continue;
}
else if(check->proxy_ntlm_state != NTLMSTATE_NONE) {
- /* Proxy connection is using NTLM auth but we don't want NTLM */
+ /* Proxy connection is using NTLM auth but we do not want NTLM */
continue;
}
#endif
@@ -1276,7 +1252,7 @@ ConnectionExists(struct Curl_easy *data,
if(CONN_INUSE(check)) {
DEBUGASSERT(canmultiplex);
DEBUGASSERT(check->bits.multiplex);
- /* If multiplexed, make sure we don't go over concurrency limit */
+ /* If multiplexed, make sure we do not go over concurrency limit */
if(CONN_INUSE(check) >=
Curl_multi_max_concurrent_streams(data->multi)) {
infof(data, "client side MAX_CONCURRENT_STREAMS reached"
@@ -1293,8 +1269,8 @@ ConnectionExists(struct Curl_easy *data,
infof(data, "Multiplexed connection found");
}
else if(prune_if_dead(check, data)) {
- /* disconnect it */
- Curl_disconnect(data, check, TRUE);
+ /* disconnect it, do not treat as aborted */
+ Curl_disconnect(data, check, FALSE);
continue;
}
@@ -1357,7 +1333,7 @@ static struct connectdata *allocate_conn(struct Curl_easy *data)
conn->primary.remote_port = -1; /* unknown at this point */
conn->remote_port = -1; /* unknown at this point */
- /* Default protocol-independent behavior doesn't support persistent
+ /* Default protocol-independent behavior does not support persistent
connections, so we set this to force-close. Protocols that support
this need to set this to FALSE in their "curl_do" functions. */
connclose(conn, "Default to force-close");
@@ -1643,7 +1619,7 @@ const struct Curl_handler *Curl_getn_scheme_handler(const char *scheme,
unsigned int c = 978;
while(l) {
c <<= 5;
- c += Curl_raw_tolower(*s);
+ c += (unsigned int)Curl_raw_tolower(*s);
s++;
l--;
}
@@ -1678,7 +1654,7 @@ static CURLcode findprotocol(struct Curl_easy *data,
}
}
- /* The protocol was not found in the table, but we don't have to assign it
+ /* The protocol was not found in the table, but we do not have to assign it
to anything since it is already assigned to a dummy-struct in the
create_conn() function when the connectdata struct is allocated. */
failf(data, "Protocol \"%s\" %s%s", protostr,
@@ -1796,12 +1772,12 @@ static CURLcode parseurlandfillconn(struct Curl_easy *data,
if(!use_set_uh) {
char *newurl;
- uc = curl_url_set(uh, CURLUPART_URL, data->state.url,
- CURLU_GUESS_SCHEME |
- CURLU_NON_SUPPORT_SCHEME |
- (data->set.disallow_username_in_url ?
- CURLU_DISALLOW_USER : 0) |
- (data->set.path_as_is ? CURLU_PATH_AS_IS : 0));
+ uc = curl_url_set(uh, CURLUPART_URL, data->state.url, (unsigned int)
+ (CURLU_GUESS_SCHEME |
+ CURLU_NON_SUPPORT_SCHEME |
+ (data->set.disallow_username_in_url ?
+ CURLU_DISALLOW_USER : 0) |
+ (data->set.path_as_is ? CURLU_PATH_AS_IS : 0)));
if(uc) {
failf(data, "URL rejected: %s", curl_url_strerror(uc));
return Curl_uc_to_curlcode(uc);
@@ -1827,7 +1803,7 @@ static CURLcode parseurlandfillconn(struct Curl_easy *data,
return CURLE_OUT_OF_MEMORY;
}
else if(strlen(data->state.up.hostname) > MAX_URL_LEN) {
- failf(data, "Too long host name (maximum is %d)", MAX_URL_LEN);
+ failf(data, "Too long hostname (maximum is %d)", MAX_URL_LEN);
return CURLE_URL_MALFORMAT;
}
hostname = data->state.up.hostname;
@@ -1845,7 +1821,7 @@ static CURLcode parseurlandfillconn(struct Curl_easy *data,
zonefrom_url(uh, data, conn);
}
- /* make sure the connect struct gets its own copy of the host name */
+ /* make sure the connect struct gets its own copy of the hostname */
conn->host.rawalloc = strdup(hostname ? hostname : "");
if(!conn->host.rawalloc)
return CURLE_OUT_OF_MEMORY;
@@ -1892,7 +1868,7 @@ static CURLcode parseurlandfillconn(struct Curl_easy *data,
return result;
/*
- * User name and password set with their own options override the
+ * username and password set with their own options override the
* credentials possibly set in the URL.
*/
if(!data->set.str[STRING_PASSWORD]) {
@@ -1914,7 +1890,7 @@ static CURLcode parseurlandfillconn(struct Curl_easy *data,
}
if(!data->set.str[STRING_USERNAME]) {
- /* we don't use the URL API's URL decoder option here since it rejects
+ /* we do not use the URL API's URL decoder option here since it rejects
control codes and we want to allow them for some schemes in the user
and password fields */
uc = curl_url_get(uh, CURLUPART_USER, &data->state.up.user, 0);
@@ -1979,7 +1955,7 @@ static CURLcode parseurlandfillconn(struct Curl_easy *data,
/*
- * If we're doing a resumed transfer, we need to setup our stuff
+ * If we are doing a resumed transfer, we need to setup our stuff
* properly.
*/
static CURLcode setup_range(struct Curl_easy *data)
@@ -2074,7 +2050,7 @@ static char *detect_proxy(struct Curl_easy *data,
* the first to check for.)
*
* For compatibility, the all-uppercase versions of these variables are
- * checked if the lowercase versions don't exist.
+ * checked if the lowercase versions do not exist.
*/
char proxy_env[20];
char *envp = proxy_env;
@@ -2088,7 +2064,7 @@ static char *detect_proxy(struct Curl_easy *data,
proxy = curl_getenv(proxy_env);
/*
- * We don't try the uppercase version of HTTP_PROXY because of
+ * We do not try the uppercase version of HTTP_PROXY because of
* security reasons:
*
* When curl is used in a webserver application
@@ -2137,7 +2113,7 @@ static char *detect_proxy(struct Curl_easy *data,
/*
* If this is supposed to use a proxy, we need to figure out the proxy
- * host name, so that we can reuse an existing connection
+ * hostname, so that we can reuse an existing connection
* that may exist registered to the same proxy host.
*/
static CURLcode parse_proxy(struct Curl_easy *data,
@@ -2284,7 +2260,7 @@ static CURLcode parse_proxy(struct Curl_easy *data,
conn->primary.remote_port = port;
}
- /* now, clone the proxy host name */
+ /* now, clone the proxy hostname */
uc = curl_url_get(uhp, CURLUPART_HOST, &host, CURLU_URLDECODE);
if(uc) {
result = CURLE_OUT_OF_MEMORY;
@@ -2374,7 +2350,6 @@ static CURLcode create_conn_helper_init_proxy(struct Curl_easy *data,
char *socksproxy = NULL;
char *no_proxy = NULL;
CURLcode result = CURLE_OK;
- bool spacesep = FALSE;
/*************************************************************
* Extract the user and password from the authentication string
@@ -2421,8 +2396,7 @@ static CURLcode create_conn_helper_init_proxy(struct Curl_easy *data,
}
if(Curl_check_noproxy(conn->host.name, data->set.str[STRING_NOPROXY] ?
- data->set.str[STRING_NOPROXY] : no_proxy,
- &spacesep)) {
+ data->set.str[STRING_NOPROXY] : no_proxy)) {
Curl_safefree(proxy);
Curl_safefree(socksproxy);
}
@@ -2431,9 +2405,6 @@ static CURLcode create_conn_helper_init_proxy(struct Curl_easy *data,
/* if the host is not in the noproxy list, detect proxy. */
proxy = detect_proxy(data, conn);
#endif /* CURL_DISABLE_HTTP */
- if(spacesep)
- infof(data, "space-separated NOPROXY patterns are deprecated");
-
Curl_safefree(no_proxy);
#ifdef USE_UNIX_SOCKETS
@@ -2445,14 +2416,14 @@ static CURLcode create_conn_helper_init_proxy(struct Curl_easy *data,
#endif
if(proxy && (!*proxy || (conn->handler->flags & PROTOPT_NONETWORK))) {
- free(proxy); /* Don't bother with an empty proxy string or if the
- protocol doesn't work with network */
+ free(proxy); /* Do not bother with an empty proxy string or if the
+ protocol does not work with network */
proxy = NULL;
}
if(socksproxy && (!*socksproxy ||
(conn->handler->flags & PROTOPT_NONETWORK))) {
- free(socksproxy); /* Don't bother with an empty socks proxy string or if
- the protocol doesn't work with network */
+ free(socksproxy); /* Do not bother with an empty socks proxy string or if
+ the protocol does not work with network */
socksproxy = NULL;
}
@@ -2524,7 +2495,7 @@ static CURLcode create_conn_helper_init_proxy(struct Curl_easy *data,
conn->bits.proxy = conn->bits.httpproxy || conn->bits.socksproxy;
if(!conn->bits.proxy) {
- /* we aren't using the proxy after all... */
+ /* we are not using the proxy after all... */
conn->bits.proxy = FALSE;
conn->bits.httpproxy = FALSE;
conn->bits.socksproxy = FALSE;
@@ -2546,7 +2517,7 @@ out:
/*
* Curl_parse_login_details()
*
- * This is used to parse a login string for user name, password and options in
+ * This is used to parse a login string for username, password and options in
* the following formats:
*
* user
@@ -2693,7 +2664,7 @@ static CURLcode override_login(struct Curl_easy *data,
bool url_provided = FALSE;
if(data->state.aptr.user) {
- /* there was a user name in the URL. Use the URL decoded version */
+ /* there was a username in the URL. Use the URL decoded version */
userp = &data->state.aptr.user;
url_provided = TRUE;
}
@@ -2774,7 +2745,7 @@ static CURLcode override_login(struct Curl_easy *data,
}
/*
- * Set the login details so they're available in the connection
+ * Set the login details so they are available in the connection
*/
static CURLcode set_login(struct Curl_easy *data,
struct connectdata *conn)
@@ -2865,8 +2836,8 @@ static CURLcode parse_connect_to_host_port(struct Curl_easy *data,
else
infof(data, "Invalid IPv6 address format");
portptr = ptr;
- /* Note that if this didn't end with a bracket, we still advanced the
- * hostptr first, but I can't see anything wrong with that as no host
+ /* Note that if this did not end with a bracket, we still advanced the
+ * hostptr first, but I cannot see anything wrong with that as no host
* name nor a numeric can legally start with a bracket.
*/
#else
@@ -2880,7 +2851,7 @@ static CURLcode parse_connect_to_host_port(struct Curl_easy *data,
host_portno = strchr(portptr, ':');
if(host_portno) {
char *endp = NULL;
- *host_portno = '\0'; /* cut off number from host name */
+ *host_portno = '\0'; /* cut off number from hostname */
host_portno++;
if(*host_portno) {
long portparse = strtol(host_portno, &endp, 10);
@@ -2895,7 +2866,7 @@ static CURLcode parse_connect_to_host_port(struct Curl_easy *data,
}
}
- /* now, clone the cleaned host name */
+ /* now, clone the cleaned hostname */
DEBUGASSERT(hostptr);
*hostname_result = strdup(hostptr);
if(!*hostname_result) {
@@ -3028,7 +2999,7 @@ static CURLcode parse_connect_to_slist(struct Curl_easy *data,
#ifndef CURL_DISABLE_ALTSVC
if(data->asi && !host && (port == -1) &&
((conn->handler->protocol == CURLPROTO_HTTPS) ||
-#ifdef CURLDEBUG
+#ifdef DEBUGBUILD
/* allow debug builds to circumvent the HTTPS restriction */
getenv("CURL_ALTSVC_HTTP")
#else
@@ -3091,7 +3062,7 @@ static CURLcode parse_connect_to_slist(struct Curl_easy *data,
conn->transport = TRNSPRT_QUIC;
conn->httpversion = 30;
break;
- default: /* shouldn't be possible */
+ default: /* should not be possible */
break;
}
}
@@ -3254,7 +3225,7 @@ static CURLcode resolve_server(struct Curl_easy *data,
/* Resolve the name of the server or proxy */
if(conn->bits.reuse) {
- /* We're reusing the connection - no need to resolve anything, and
+ /* We are reusing the connection - no need to resolve anything, and
idnconvert_hostname() was called already in create_conn() for the reuse
case. */
*async = FALSE;
@@ -3266,7 +3237,7 @@ static CURLcode resolve_server(struct Curl_easy *data,
/*
* Cleanup the connection `temp`, just allocated for `data`, before using the
- * previously `existing` one for `data`. All relevant info is copied over
+ * previously `existing` one for `data`. All relevant info is copied over
* and `temp` is freed.
*/
static void reuse_conn(struct Curl_easy *data,
@@ -3276,7 +3247,7 @@ static void reuse_conn(struct Curl_easy *data,
/* get the user+password information from the temp struct since it may
* be new for this request even when we reuse an existing connection */
if(temp->user) {
- /* use the new user name and password though */
+ /* use the new username and password though */
Curl_safefree(existing->user);
Curl_safefree(existing->passwd);
existing->user = temp->user;
@@ -3288,7 +3259,7 @@ static void reuse_conn(struct Curl_easy *data,
#ifndef CURL_DISABLE_PROXY
existing->bits.proxy_user_passwd = temp->bits.proxy_user_passwd;
if(existing->bits.proxy_user_passwd) {
- /* use the new proxy user name and proxy password though */
+ /* use the new proxy username and proxy password though */
Curl_safefree(existing->http_proxy.user);
Curl_safefree(existing->socks_proxy.user);
Curl_safefree(existing->http_proxy.passwd);
@@ -3335,14 +3306,14 @@ static void reuse_conn(struct Curl_easy *data,
temp->hostname_resolve = NULL;
/* reuse init */
- existing->bits.reuse = TRUE; /* yes, we're reusing here */
+ existing->bits.reuse = TRUE; /* yes, we are reusing here */
- conn_free(data, temp);
+ Curl_conn_free(data, temp);
}
/**
* create_conn() sets up a new connectdata struct, or reuses an already
- * existing one, and resolves host name.
+ * existing one, and resolves hostname.
*
* if this function returns CURLE_OK and *async is set to TRUE, the resolve
* response will be coming asynchronously. If *async is FALSE, the name is
@@ -3524,7 +3495,7 @@ static CURLcode create_conn(struct Curl_easy *data,
goto out;
/***********************************************************************
- * file: is a special case in that it doesn't need a network connection
+ * file: is a special case in that it does not need a network connection
***********************************************************************/
#ifndef CURL_DISABLE_FILE
if(conn->handler->flags & PROTOPT_NONETWORK) {
@@ -3535,7 +3506,7 @@ static CURLcode create_conn(struct Curl_easy *data,
Curl_persistconninfo(data, conn, NULL);
result = conn->handler->connect_it(data, &done);
- /* Setup a "faked" transfer that'll do nothing */
+ /* Setup a "faked" transfer that will do nothing */
if(!result) {
Curl_attach_connection(data, conn);
result = Curl_conncache_add_conn(data);
@@ -3552,7 +3523,7 @@ static CURLcode create_conn(struct Curl_easy *data,
(void)conn->handler->done(data, result, FALSE);
goto out;
}
- Curl_xfer_setup(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup_nop(data);
}
/* since we skip do_init() */
@@ -3667,16 +3638,22 @@ static CURLcode create_conn(struct Curl_easy *data,
conn_candidate = Curl_conncache_extract_oldest(data);
if(conn_candidate)
Curl_disconnect(data, conn_candidate, FALSE);
- else {
- infof(data, "No connections available in cache");
- connections_available = FALSE;
- }
+ else
+#ifndef CURL_DISABLE_DOH
+ if(data->set.dohfor)
+ infof(data, "Allowing DoH to override max connection limit");
+ else
+#endif
+ {
+ infof(data, "No connections available in cache");
+ connections_available = FALSE;
+ }
}
if(!connections_available) {
infof(data, "No connections available.");
- conn_free(data, conn);
+ Curl_conn_free(data, conn);
*in_connect = NULL;
result = CURLE_NO_CONNECTION_AVAILABLE;
@@ -3700,7 +3677,7 @@ static CURLcode create_conn(struct Curl_easy *data,
}
#if defined(USE_NTLM)
- /* If NTLM is requested in a part of this connection, make sure we don't
+ /* If NTLM is requested in a part of this connection, make sure we do not
assume the state is fine as this is a fresh connection and NTLM is
connection based. */
if((data->state.authhost.picked & CURLAUTH_NTLM) &&
@@ -3769,7 +3746,7 @@ CURLcode Curl_setup_conn(struct Curl_easy *data,
#ifndef CURL_DISABLE_PROXY
/* set proxy_connect_closed to false unconditionally already here since it
is used strictly to provide extra information to a parent function in the
- case of proxy CONNECT failures and we must make sure we don't have it
+ case of proxy CONNECT failures and we must make sure we do not have it
lingering set from a previous invoke */
conn->bits.proxy_connect_closed = FALSE;
#endif
@@ -3812,7 +3789,7 @@ CURLcode Curl_connect(struct Curl_easy *data,
/* multiplexed */
*protocol_done = TRUE;
else if(!*asyncp) {
- /* DNS resolution is done: that's either because this is a reused
+ /* DNS resolution is done: that is either because this is a reused
connection, in which case DNS was unnecessary, or because DNS
really did finish already (synch resolver/fast async resolve) */
result = Curl_setup_conn(data, protocol_done);
@@ -3823,7 +3800,7 @@ CURLcode Curl_connect(struct Curl_easy *data,
return result;
}
else if(result && conn) {
- /* We're not allowed to return failure with memory left allocated in the
+ /* We are not allowed to return failure with memory left allocated in the
connectdata struct, free those here */
Curl_detach_connection(data);
Curl_conncache_remove_conn(data, conn, TRUE);
@@ -3849,9 +3826,9 @@ CURLcode Curl_init_do(struct Curl_easy *data, struct connectdata *conn)
CURLcode result;
if(conn) {
- conn->bits.do_more = FALSE; /* by default there's no curl_do_more() to
+ conn->bits.do_more = FALSE; /* by default there is no curl_do_more() to
use */
- /* if the protocol used doesn't support wildcards, switch it off */
+ /* if the protocol used does not support wildcards, switch it off */
if(data->state.wildcardmatch &&
!(conn->handler->flags & PROTOPT_WILDCARD))
data->state.wildcardmatch = FALSE;
diff --git a/libs/libcurl/src/url.h b/libs/libcurl/src/url.h
index b77f4d24f2..bd080dd35a 100644
--- a/libs/libcurl/src/url.h
+++ b/libs/libcurl/src/url.h
@@ -38,9 +38,10 @@ CURLcode Curl_uc_to_curlcode(CURLUcode uc);
CURLcode Curl_close(struct Curl_easy **datap); /* opposite of curl_open() */
CURLcode Curl_connect(struct Curl_easy *, bool *async, bool *protocol_connect);
void Curl_disconnect(struct Curl_easy *data,
- struct connectdata *, bool dead_connection);
+ struct connectdata *, bool aborted);
CURLcode Curl_setup_conn(struct Curl_easy *data,
bool *protocol_done);
+void Curl_conn_free(struct Curl_easy *data, struct connectdata *conn);
CURLcode Curl_parse_login_details(const char *login, const size_t len,
char **userptr, char **passwdptr,
char **optionsptr);
diff --git a/libs/libcurl/src/urlapi-int.h b/libs/libcurl/src/urlapi-int.h
index f28157732c..890e3285d9 100644
--- a/libs/libcurl/src/urlapi-int.h
+++ b/libs/libcurl/src/urlapi-int.h
@@ -30,9 +30,9 @@ size_t Curl_is_absolute_url(const char *url, char *buf, size_t buflen,
CURLUcode Curl_url_set_authority(CURLU *u, const char *authority);
-#ifdef DEBUGBUILD
-CURLUcode Curl_parse_port(struct Curl_URL *u, struct dynbuf *host,
- bool has_scheme);
+#ifdef UNITTESTS
+UNITTEST CURLUcode Curl_parse_port(struct Curl_URL *u, struct dynbuf *host,
+ bool has_scheme);
#endif
#endif /* HEADER_CURL_URLAPI_INT_H */
diff --git a/libs/libcurl/src/urlapi.c b/libs/libcurl/src/urlapi.c
index 7a938bbe9c..2179e106de 100644
--- a/libs/libcurl/src/urlapi.c
+++ b/libs/libcurl/src/urlapi.c
@@ -82,6 +82,7 @@ struct Curl_URL {
unsigned short portnum; /* the numerical version (if 'port' is set) */
BIT(query_present); /* to support blank */
BIT(fragment_present); /* to support blank */
+ BIT(guessed_scheme); /* when a URL without scheme is parsed */
};
#define DEFAULT_SCHEME "https"
@@ -101,7 +102,7 @@ static void free_urlhandle(struct Curl_URL *u)
}
/*
- * Find the separator at the end of the host name, or the '?' in cases like
+ * Find the separator at the end of the hostname, or the '?' in cases like
* http://www.example.com?id=2380
*/
static const char *find_host_sep(const char *url)
@@ -140,7 +141,7 @@ static const char hexdigits[] = "0123456789abcdef";
/* urlencode_str() writes data into an output dynbuf and URL-encodes the
* spaces in the source URL accordingly.
*
- * URL encoding should be skipped for host names, otherwise IDN resolution
+ * URL encoding should be skipped for hostnames, otherwise IDN resolution
* will fail.
*/
static CURLUcode urlencode_str(struct dynbuf *o, const char *url,
@@ -205,7 +206,7 @@ static CURLUcode urlencode_str(struct dynbuf *o, const char *url,
size_t Curl_is_absolute_url(const char *url, char *buf, size_t buflen,
bool guess_scheme)
{
- int i = 0;
+ size_t i = 0;
DEBUGASSERT(!buf || (buflen > MAX_SCHEME_LEN));
(void)buflen; /* only used in debug-builds */
if(buf)
@@ -229,7 +230,7 @@ size_t Curl_is_absolute_url(const char *url, char *buf, size_t buflen,
if(i && (url[i] == ':') && ((url[i + 1] == '/') || !guess_scheme)) {
/* If this does not guess scheme, the scheme always ends with the colon so
that this also detects data: URLs etc. In guessing mode, data: could
- be the host name "data" with a specified port number. */
+ be the hostname "data" with a specified port number. */
/* the length of the scheme is the name part only */
size_t len = i;
@@ -267,7 +268,7 @@ static CURLcode concat_url(char *base, const char *relurl, char **newurl)
bool skip_slash = FALSE;
*newurl = NULL;
- /* protsep points to the start of the host name */
+ /* protsep points to the start of the hostname */
protsep = strstr(base, "//");
if(!protsep)
protsep = base;
@@ -277,13 +278,13 @@ static CURLcode concat_url(char *base, const char *relurl, char **newurl)
if('/' != relurl[0]) {
int level = 0;
- /* First we need to find out if there's a ?-letter in the URL,
+ /* First we need to find out if there is a ?-letter in the URL,
and cut it and the right-side of that off */
pathsep = strchr(protsep, '?');
if(pathsep)
*pathsep = 0;
- /* we have a relative path to append to the last slash if there's one
+ /* we have a relative path to append to the last slash if there is one
available, or the new URL is just a query string (starts with a '?') or
a fragment (starts with '#') we append the new one at the end of the
current URL */
@@ -292,7 +293,7 @@ static CURLcode concat_url(char *base, const char *relurl, char **newurl)
if(pathsep)
*pathsep = 0;
- /* Check if there's any slash after the host name, and if so, remember
+ /* Check if there is any slash after the hostname, and if so, remember
that position instead */
pathsep = strchr(protsep, '/');
if(pathsep)
@@ -347,7 +348,7 @@ static CURLcode concat_url(char *base, const char *relurl, char **newurl)
if(pathsep) {
/* When people use badly formatted URLs, such as
"http://www.example.com?dir=/home/daniel" we must not use the first
- slash, if there's a ?-letter before it! */
+ slash, if there is a ?-letter before it! */
char *sep = strchr(protsep, '?');
if(sep && (sep < pathsep))
pathsep = sep;
@@ -355,8 +356,8 @@ static CURLcode concat_url(char *base, const char *relurl, char **newurl)
}
else {
/* There was no slash. Now, since we might be operating on a badly
- formatted URL, such as "http://www.example.com?id=2380" which
- doesn't use a slash separator as it is supposed to, we need to check
+ formatted URL, such as "http://www.example.com?id=2380" which does
+ not use a slash separator as it is supposed to, we need to check
for a ?-letter as well! */
pathsep = strchr(protsep, '?');
if(pathsep)
@@ -367,7 +368,7 @@ static CURLcode concat_url(char *base, const char *relurl, char **newurl)
Curl_dyn_init(&newest, CURL_MAX_INPUT_LENGTH);
- /* copy over the root url part */
+ /* copy over the root URL part */
result = Curl_dyn_add(&newest, base);
if(result)
return result;
@@ -420,15 +421,15 @@ static CURLUcode junkscan(const char *url, size_t *urllen, unsigned int flags)
/*
* parse_hostname_login()
*
- * Parse the login details (user name, password and options) from the URL and
- * strip them out of the host name
+ * Parse the login details (username, password and options) from the URL and
+ * strip them out of the hostname
*
*/
static CURLUcode parse_hostname_login(struct Curl_URL *u,
const char *login,
size_t len,
unsigned int flags,
- size_t *offset) /* to the host name */
+ size_t *offset) /* to the hostname */
{
CURLUcode result = CURLUE_OK;
CURLcode ccode;
@@ -475,7 +476,7 @@ static CURLUcode parse_hostname_login(struct Curl_URL *u,
if(userp) {
if(flags & CURLU_DISALLOW_USER) {
- /* Option DISALLOW_USER is set and url contains username. */
+ /* Option DISALLOW_USER is set and URL contains username. */
result = CURLUE_USER_NOT_ALLOWED;
goto out;
}
@@ -493,7 +494,7 @@ static CURLUcode parse_hostname_login(struct Curl_URL *u,
u->options = optionsp;
}
- /* the host name starts at this offset */
+ /* the hostname starts at this offset */
*offset = ptr - login;
return CURLUE_OK;
@@ -538,11 +539,11 @@ UNITTEST CURLUcode Curl_parse_port(struct Curl_URL *u, struct dynbuf *host,
unsigned long port;
size_t keep = portptr - hostname;
- /* Browser behavior adaptation. If there's a colon with no digits after,
+ /* Browser behavior adaptation. If there is a colon with no digits after,
just cut off the name there which makes us ignore the colon and just
use the default port. Firefox, Chrome and Safari all do that.
- Don't do it if the URL has no scheme, to make something that looks like
+ Do not do it if the URL has no scheme, to make something that looks like
a scheme not work!
*/
Curl_dyn_setlen(host, keep);
@@ -591,7 +592,7 @@ static CURLUcode ipv6_parse(struct Curl_URL *u, char *hostname,
char zoneid[16];
int i = 0;
char *h = &hostname[len + 1];
- /* pass '25' if present and is a url encoded percent sign */
+ /* pass '25' if present and is a URL encoded percent sign */
if(!strncmp(h, "25", 2) && h[2] && (h[2] != ']'))
h += 2;
while(*h && (*h != ']') && (i < 15))
@@ -664,7 +665,6 @@ static CURLUcode hostname_check(struct Curl_URL *u, char *hostname,
*/
#define HOST_ERROR -1 /* out of memory */
-#define HOST_BAD -2 /* bad IPv4 address */
#define HOST_NAME 1
#define HOST_IPV4 2
@@ -686,7 +686,7 @@ static int ipv4_normalize(struct dynbuf *host)
char *endp = NULL;
unsigned long l;
if(!ISDIGIT(*c))
- /* most importantly this doesn't allow a leading plus or minus */
+ /* most importantly this does not allow a leading plus or minus */
return HOST_NAME;
l = strtoul(c, &endp, 0);
if(errno)
@@ -802,7 +802,7 @@ static CURLUcode parse_authority(struct Curl_URL *u,
CURLcode result;
/*
- * Parse the login details and strip them out of the host name.
+ * Parse the login details and strip them out of the hostname.
*/
uc = parse_hostname_login(u, auth, authlen, flags, &offset);
if(uc)
@@ -835,7 +835,6 @@ static CURLUcode parse_authority(struct Curl_URL *u,
case HOST_ERROR:
uc = CURLUE_OUT_OF_MEMORY;
break;
- case HOST_BAD:
default:
uc = CURLUE_BAD_HOSTNAME; /* Bad IPv4 address even */
break;
@@ -907,7 +906,7 @@ UNITTEST int dedotdotify(const char *input, size_t clen, char **outp)
do {
bool dotdot = TRUE;
if(*input == '.') {
- /* A. If the input buffer begins with a prefix of "../" or "./", then
+ /* A. If the input buffer begins with a prefix of "../" or "./", then
remove that prefix from the input buffer; otherwise, */
if(!strncmp("./", input, 2)) {
@@ -918,7 +917,7 @@ UNITTEST int dedotdotify(const char *input, size_t clen, char **outp)
input += 3;
clen -= 3;
}
- /* D. if the input buffer consists only of "." or "..", then remove
+ /* D. if the input buffer consists only of "." or "..", then remove
that from the input buffer; otherwise, */
else if(!strcmp(".", input) || !strcmp("..", input) ||
@@ -930,7 +929,7 @@ UNITTEST int dedotdotify(const char *input, size_t clen, char **outp)
dotdot = FALSE;
}
else if(*input == '/') {
- /* B. if the input buffer begins with a prefix of "/./" or "/.", where
+ /* B. if the input buffer begins with a prefix of "/./" or "/.", where
"." is a complete path segment, then replace that prefix with "/" in
the input buffer; otherwise, */
if(!strncmp("/./", input, 3)) {
@@ -943,7 +942,7 @@ UNITTEST int dedotdotify(const char *input, size_t clen, char **outp)
break;
}
- /* C. if the input buffer begins with a prefix of "/../" or "/..",
+ /* C. if the input buffer begins with a prefix of "/../" or "/..",
where ".." is a complete path segment, then replace that prefix with
"/" in the input buffer and remove the last segment and its
preceding "/" (if any) from the output buffer; otherwise, */
@@ -977,7 +976,7 @@ UNITTEST int dedotdotify(const char *input, size_t clen, char **outp)
dotdot = FALSE;
if(!dotdot) {
- /* E. move the first path segment in the input buffer to the end of
+ /* E. move the first path segment in the input buffer to the end of
the output buffer, including the initial "/" character (if any) and
any subsequent characters up to, but not including, the next "/"
character or the end of the input buffer. */
@@ -1070,7 +1069,7 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags)
* Appendix E, but believe me, it was meant to be there. --MK)
*/
if(ptr[0] != '/' && !STARTS_WITH_URL_DRIVE_PREFIX(ptr)) {
- /* the URL includes a host name, it must match "localhost" or
+ /* the URL includes a hostname, it must match "localhost" or
"127.0.0.1" to be valid */
if(checkprefix("localhost/", ptr) ||
checkprefix("127.0.0.1/", ptr)) {
@@ -1080,9 +1079,9 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags)
#if defined(_WIN32)
size_t len;
- /* the host name, NetBIOS computer name, can not contain disallowed
+ /* the hostname, NetBIOS computer name, can not contain disallowed
chars, and the delimiting slash character must be appended to the
- host name */
+ hostname */
path = strpbrk(ptr, "/\\:*?\"<>|");
if(!path || *path != '/') {
result = CURLUE_BAD_FILE_URL;
@@ -1118,7 +1117,7 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags)
Curl_dyn_reset(&host);
#if !defined(_WIN32) && !defined(MSDOS) && !defined(__CYGWIN__)
- /* Don't allow Windows drive letters when not in Windows.
+ /* Do not allow Windows drive letters when not in Windows.
* This catches both "file:/c:" and "file:c:" */
if(('/' == path[0] && STARTS_WITH_URL_DRIVE_PREFIX(&path[1])) ||
STARTS_WITH_URL_DRIVE_PREFIX(path)) {
@@ -1162,7 +1161,7 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags)
result = CURLUE_BAD_SLASHES;
goto fail;
}
- hostp = p; /* host name starts here */
+ hostp = p; /* hostname starts here */
}
else {
/* no scheme! */
@@ -1188,7 +1187,7 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags)
}
}
- /* find the end of the host name + port number */
+ /* find the end of the hostname + port number */
hostlen = strcspn(hostp, "/?#");
path = &hostp[hostlen];
@@ -1202,7 +1201,7 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags)
if((flags & CURLU_GUESS_SCHEME) && !schemep) {
const char *hostname = Curl_dyn_ptr(&host);
- /* legacy curl-style guess based on host name */
+ /* legacy curl-style guess based on hostname */
if(checkprefix("ftp.", hostname))
schemep = "ftp";
else if(checkprefix("dict.", hostname))
@@ -1223,6 +1222,7 @@ static CURLUcode parseurl(const char *url, CURLU *u, unsigned int flags)
result = CURLUE_OUT_OF_MEMORY;
goto fail;
}
+ u->guessed_scheme = TRUE;
}
}
else if(flags & CURLU_NO_AUTHORITY) {
@@ -1437,6 +1437,8 @@ CURLUcode curl_url_get(const CURLU *u, CURLUPart what,
ptr = u->scheme;
ifmissing = CURLUE_NO_SCHEME;
urldecode = FALSE; /* never for schemes */
+ if((flags & CURLU_NO_GUESS_SCHEME) && u->guessed_scheme)
+ return CURLUE_NO_SCHEME;
break;
case CURLUPART_USER:
ptr = u->user;
@@ -1465,7 +1467,7 @@ CURLUcode curl_url_get(const CURLU *u, CURLUPart what,
ifmissing = CURLUE_NO_PORT;
urldecode = FALSE; /* never for port */
if(!ptr && (flags & CURLU_DEFAULT_PORT) && u->scheme) {
- /* there's no stored port number, but asked to deliver
+ /* there is no stored port number, but asked to deliver
a default one for the scheme */
const struct Curl_handler *h = Curl_get_scheme_handler(u->scheme);
if(h) {
@@ -1525,6 +1527,7 @@ CURLUcode curl_url_get(const CURLU *u, CURLUPart what,
return CURLUE_NO_HOST;
else {
const struct Curl_handler *h = NULL;
+ char schemebuf[MAX_SCHEME_LEN + 5];
if(u->scheme)
scheme = u->scheme;
else if(flags & CURLU_DEFAULT_SCHEME)
@@ -1534,7 +1537,7 @@ CURLUcode curl_url_get(const CURLU *u, CURLUPart what,
h = Curl_get_scheme_handler(scheme);
if(!port && (flags & CURLU_DEFAULT_PORT)) {
- /* there's no stored port number, but asked to deliver
+ /* there is no stored port number, but asked to deliver
a default one for the scheme */
if(h) {
msnprintf(portbuf, sizeof(portbuf), "%u", h->defport);
@@ -1595,8 +1598,13 @@ CURLUcode curl_url_get(const CURLU *u, CURLUPart what,
}
}
- url = aprintf("%s://%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
- scheme,
+ if(!(flags & CURLU_NO_GUESS_SCHEME) || !u->guessed_scheme)
+ msnprintf(schemebuf, sizeof(schemebuf), "%s://", scheme);
+ else
+ schemebuf[0] = 0;
+
+ url = aprintf("%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ schemebuf,
u->user ? u->user : "",
u->password ? ":": "",
u->password ? u->password : "",
@@ -1718,6 +1726,7 @@ CURLUcode curl_url_set(CURLU *u, CURLUPart what,
break;
case CURLUPART_SCHEME:
storep = &u->scheme;
+ u->guessed_scheme = FALSE;
break;
case CURLUPART_USER:
storep = &u->user;
@@ -1790,6 +1799,7 @@ CURLUcode curl_url_set(CURLU *u, CURLUPart what,
}
else
return CURLUE_BAD_SCHEME;
+ u->guessed_scheme = FALSE;
break;
}
case CURLUPART_USER:
@@ -1862,7 +1872,7 @@ CURLUcode curl_url_set(CURLU *u, CURLUPart what,
return CURLUE_MALFORMED_INPUT;
/* if the new thing is absolute or the old one is not
- * (we could not get an absolute url in 'oldurl'),
+ * (we could not get an absolute URL in 'oldurl'),
* then replace the existing with the new. */
if(Curl_is_absolute_url(part, NULL, 0,
flags & (CURLU_GUESS_SCHEME|
@@ -1978,7 +1988,7 @@ nomem:
else if(what == CURLUPART_HOST) {
size_t n = Curl_dyn_len(&enc);
if(!n && (flags & CURLU_NO_AUTHORITY)) {
- /* Skip hostname check, it's allowed to be empty. */
+ /* Skip hostname check, it is allowed to be empty. */
}
else {
if(!n || hostname_check(u, (char *)newp, n)) {
diff --git a/libs/libcurl/src/urldata.h b/libs/libcurl/src/urldata.h
index 1e606dde5b..5a5ef0663b 100644
--- a/libs/libcurl/src/urldata.h
+++ b/libs/libcurl/src/urldata.h
@@ -67,7 +67,7 @@ struct curl_trc_featt;
#ifdef USE_WEBSOCKETS
/* CURLPROTO_GOPHERS (29) is the highest publicly used protocol bit number,
* the rest are internal information. If we use higher bits we only do this on
- * platforms that have a >= 64 bit type and then we use such a type for the
+ * platforms that have a >= 64-bit type and then we use such a type for the
* protocol fields in the protocol handler.
*/
#define CURLPROTO_WS (1<<30)
@@ -257,22 +257,6 @@ enum protection_level {
};
#endif
-/* enum for the nonblocking SSL connection state machine */
-typedef enum {
- ssl_connect_1,
- ssl_connect_2,
- ssl_connect_2_reading,
- ssl_connect_2_writing,
- ssl_connect_3,
- ssl_connect_done
-} ssl_connect_state;
-
-typedef enum {
- ssl_connection_none,
- ssl_connection_negotiating,
- ssl_connection_complete
-} ssl_connection_state;
-
/* SSL backend-specific data; declared differently by each SSL backend */
struct ssl_backend_data;
@@ -292,7 +276,7 @@ struct ssl_peer {
};
struct ssl_primary_config {
- char *CApath; /* certificate dir (doesn't work on windows) */
+ char *CApath; /* certificate dir (does not work on windows) */
char *CAfile; /* certificate to verify peer against */
char *issuercert; /* optional issuer certificate filename */
char *clientcert;
@@ -314,7 +298,7 @@ struct ssl_primary_config {
BIT(verifypeer); /* set TRUE if this is desired */
BIT(verifyhost); /* set TRUE if CN/SAN must match hostname */
BIT(verifystatus); /* set TRUE if certificate status must be checked */
- BIT(sessionid); /* cache session IDs or not */
+ BIT(cache_session); /* cache session or not */
};
struct ssl_config_data {
@@ -323,7 +307,7 @@ struct ssl_config_data {
curl_ssl_ctx_callback fsslctx; /* function to initialize ssl ctx */
void *fsslctxp; /* parameter for call back */
char *cert_type; /* format for certificate (default: PEM)*/
- char *key; /* private key file name */
+ char *key; /* private key filename */
struct curl_blob *key_blob;
char *key_type; /* format for private key (default: PEM) */
char *key_passwd; /* plain text private key password */
@@ -331,7 +315,7 @@ struct ssl_config_data {
BIT(falsestart);
BIT(enable_beast); /* allow this flaw for interoperability's sake */
BIT(no_revoke); /* disable SSL certificate revocation checks */
- BIT(no_partialchain); /* don't accept partial certificate chains */
+ BIT(no_partialchain); /* do not accept partial certificate chains */
BIT(revoke_best_effort); /* ignore SSL revocation offline/missing revocation
list errors */
BIT(native_ca_store); /* use the native ca store of operating system */
@@ -348,8 +332,8 @@ typedef void Curl_ssl_sessionid_dtor(void *sessionid, size_t idsize);
/* information stored about one single SSL session */
struct Curl_ssl_session {
- char *name; /* host name for which this ID was used */
- char *conn_to_host; /* host name for the connection (may be NULL) */
+ char *name; /* hostname for which this ID was used */
+ char *conn_to_host; /* hostname for the connection (may be NULL) */
const char *scheme; /* protocol scheme used */
void *sessionid; /* as returned from the SSL layer */
size_t idsize; /* if known, otherwise 0 */
@@ -535,10 +519,10 @@ struct ConnectBits {
re-attempted at another connection. */
#ifndef CURL_DISABLE_FTP
BIT(ftp_use_epsv); /* As set with CURLOPT_FTP_USE_EPSV, but if we find out
- EPSV doesn't work we disable it for the forthcoming
+ EPSV does not work we disable it for the forthcoming
requests */
BIT(ftp_use_eprt); /* As set with CURLOPT_FTP_USE_EPRT, but if we find out
- EPRT doesn't work we disable it for the forthcoming
+ EPRT does not work we disable it for the forthcoming
requests */
BIT(ftp_use_data_ssl); /* Enabled SSL for the data connection */
BIT(ftp_use_control_ssl); /* Enabled SSL for the control connection */
@@ -562,6 +546,9 @@ struct ConnectBits {
accept() */
BIT(parallel_connect); /* set TRUE when a parallel connect attempt has
started (happy eyeballs) */
+ BIT(aborted); /* connection was aborted, e.g. in unclean state */
+ BIT(shutdown_handler); /* connection shutdown: handler shut down */
+ BIT(shutdown_filters); /* connection shutdown: filters shut down */
};
struct hostname {
@@ -676,7 +663,7 @@ struct Curl_handler {
/* This function *MAY* be set to a protocol-dependent function that is run
* after the connect() and everything is done, as a step in the connection.
* The 'done' pointer points to a bool that should be set to TRUE if the
- * function completes before return. If it doesn't complete, the caller
+ * function completes before return. If it does not complete, the caller
* should call the ->connecting() function until it is.
*/
CURLcode (*connect_it)(struct Curl_easy *data, bool *done);
@@ -707,7 +694,7 @@ struct Curl_handler {
struct connectdata *conn, curl_socket_t *socks);
/* This function *MAY* be set to a protocol-dependent function that is run
- * by the curl_disconnect(), as a step in the disconnection. If the handler
+ * by the curl_disconnect(), as a step in the disconnection. If the handler
* is called because the connection has been considered dead,
* dead_connection is set to TRUE. The connection is (again) associated with
* the transfer here.
@@ -755,11 +742,11 @@ struct Curl_handler {
the send function might need to be called while uploading, or vice versa.
*/
#define PROTOPT_DIRLOCK (1<<3)
-#define PROTOPT_NONETWORK (1<<4) /* protocol doesn't use the network! */
+#define PROTOPT_NONETWORK (1<<4) /* protocol does not use the network! */
#define PROTOPT_NEEDSPWD (1<<5) /* needs a password, and if none is set it
gets a default */
-#define PROTOPT_NOURLQUERY (1<<6) /* protocol can't handle
- url query strings (?foo=bar) ! */
+#define PROTOPT_NOURLQUERY (1<<6) /* protocol cannot handle
+ URL query strings (?foo=bar) ! */
#define PROTOPT_CREDSPERREQUEST (1<<7) /* requires login credentials per
request instead of per connection */
#define PROTOPT_ALPN (1<<8) /* set ALPN for this */
@@ -771,8 +758,8 @@ struct Curl_handler {
this protocol and act as a gateway */
#define PROTOPT_WILDCARD (1<<12) /* protocol supports wildcard matching */
#define PROTOPT_USERPWDCTRL (1<<13) /* Allow "control bytes" (< 32 ascii) in
- user name and password */
-#define PROTOPT_NOTCPPROXY (1<<14) /* this protocol can't proxy over TCP */
+ username and password */
+#define PROTOPT_NOTCPPROXY (1<<14) /* this protocol cannot proxy over TCP */
#define CONNCHECK_NONE 0 /* No checks */
#define CONNCHECK_ISDEAD (1<<0) /* Check if the connection is dead. */
@@ -793,7 +780,7 @@ struct proxy_info {
int port;
unsigned char proxytype; /* curl_proxytype: what kind of proxy that is in
use */
- char *user; /* proxy user name string, allocated */
+ char *user; /* proxy username string, allocated */
char *passwd; /* proxy password string, allocated */
};
@@ -835,8 +822,8 @@ struct connectdata {
const struct Curl_sockaddr_ex *remote_addr;
struct hostname host;
- char *hostname_resolve; /* host name to resolve to address, allocated */
- char *secondaryhostname; /* secondary socket host name (ftp) */
+ char *hostname_resolve; /* hostname to resolve to address, allocated */
+ char *secondaryhostname; /* secondary socket hostname (ftp) */
struct hostname conn_to_host; /* the host to connect to. valid only if
bits.conn_to_host is set */
#ifndef CURL_DISABLE_PROXY
@@ -850,7 +837,7 @@ struct connectdata {
these are updated with data which comes directly from the socket. */
struct ip_quadruple primary;
struct ip_quadruple secondary;
- char *user; /* user name string, allocated */
+ char *user; /* username string, allocated */
char *passwd; /* password string, allocated */
char *options; /* options string, allocated */
char *sasl_authzid; /* authorization identity string, allocated */
@@ -863,6 +850,13 @@ struct connectdata {
Curl_recv *recv[2];
Curl_send *send[2];
struct Curl_cfilter *cfilter[2]; /* connection filters */
+ struct {
+ struct curltime start[2]; /* when filter shutdown started */
+ unsigned int timeout_ms; /* 0 means no timeout */
+ } shutdown;
+ /* Last pollset used in connection shutdown. Used to detect changes
+ * for multi_socket API. */
+ struct easy_pollset shutdown_poll;
struct ssl_primary_config ssl_config;
#ifndef CURL_DISABLE_PROXY
@@ -908,11 +902,6 @@ struct connectdata {
CtxtHandle *sslContext;
#endif
-#if defined(_WIN32) && defined(USE_WINSOCK)
- struct curltime last_sndbuf_update; /* last time readwrite_upload called
- win_update_buffer_size */
-#endif
-
#ifdef USE_GSASL
struct gsasldata gsasl;
#endif
@@ -986,7 +975,7 @@ struct connectdata {
/* When this connection is created, store the conditions for the local end
bind. This is stored before the actual bind and before any connection is
made and will serve the purpose of being used for comparison reasons so
- that subsequent bound-requested connections aren't accidentally reusing
+ that subsequent bound-requested connections are not accidentally reusing
wrong connections. */
char *localdev;
unsigned short localportrange;
@@ -1045,7 +1034,7 @@ struct PureInfo {
unsigned long httpauthavail; /* what host auth types were announced */
long numconnects; /* how many new connection did libcurl created */
char *contenttype; /* the content type of the object */
- char *wouldredirect; /* URL this would've been redirected to if asked to */
+ char *wouldredirect; /* URL this would have been redirected to if asked to */
curl_off_t retry_after; /* info from Retry-After: header */
unsigned int header_size; /* size of read header(s) in bytes */
@@ -1064,7 +1053,7 @@ struct PureInfo {
struct curl_certinfo certs; /* info about the certs. Asked for with
CURLOPT_CERTINFO / CURLINFO_CERTINFO */
CURLproxycode pxcode;
- BIT(timecond); /* set to TRUE if the time condition didn't match, which
+ BIT(timecond); /* set to TRUE if the time condition did not match, which
thus made the document NOT get fetched */
BIT(used_proxy); /* the transfer used a proxy */
};
@@ -1237,8 +1226,8 @@ struct UrlState {
curl_off_t current_speed; /* the ProgressShow() function sets this,
bytes / second */
- /* host name, port number and protocol of the first (not followed) request.
- if set, this should be the host name that we will sent authorization to,
+ /* hostname, port number and protocol of the first (not followed) request.
+ if set, this should be the hostname that we will sent authorization to,
no else. Used to make Location: following not keep sending user+password.
This is strdup()ed data. */
char *first_host;
@@ -1375,7 +1364,7 @@ struct UrlState {
unsigned char select_bits; /* != 0 -> bitmask of socket events for this
transfer overriding anything the socket may
report */
-#ifdef CURLDEBUG
+#ifdef DEBUGBUILD
BIT(conncache_lock);
#endif
/* when curl_easy_perform() is called, the multi handle is "owned" by
@@ -1390,7 +1379,7 @@ struct UrlState {
called. */
BIT(allow_port); /* Is set.use_port allowed to take effect or not. This
is always set TRUE when curl_easy_perform() is called. */
- BIT(authproblem); /* TRUE if there's some problem authenticating */
+ BIT(authproblem); /* TRUE if there is some problem authenticating */
/* set after initial USER failure, to prevent an authentication loop */
BIT(wildcardmatch); /* enable wildcard matching */
BIT(disableexpect); /* TRUE if Expect: is disabled due to a previous
@@ -1428,12 +1417,12 @@ struct UrlState {
struct Curl_multi; /* declared in multihandle.c */
enum dupstring {
- STRING_CERT, /* client certificate file name */
+ STRING_CERT, /* client certificate filename */
STRING_CERT_TYPE, /* format for certificate (default: PEM)*/
- STRING_KEY, /* private key file name */
+ STRING_KEY, /* private key filename */
STRING_KEY_PASSWD, /* plain text private key password */
STRING_KEY_TYPE, /* format for private key (default: PEM) */
- STRING_SSL_CAPATH, /* CA directory name (doesn't work on windows) */
+ STRING_SSL_CAPATH, /* CA directory name (does not work on windows) */
STRING_SSL_CAFILE, /* certificate file to verify peer against */
STRING_SSL_PINNEDPUBLICKEY, /* public key file to verify peer against */
STRING_SSL_CIPHER_LIST, /* list of ciphers to use */
@@ -1442,12 +1431,12 @@ enum dupstring {
STRING_SSL_ISSUERCERT, /* issuer cert file to check certificate */
STRING_SERVICE_NAME, /* Service name */
#ifndef CURL_DISABLE_PROXY
- STRING_CERT_PROXY, /* client certificate file name */
+ STRING_CERT_PROXY, /* client certificate filename */
STRING_CERT_TYPE_PROXY, /* format for certificate (default: PEM)*/
- STRING_KEY_PROXY, /* private key file name */
+ STRING_KEY_PROXY, /* private key filename */
STRING_KEY_PASSWD_PROXY, /* plain text private key password */
STRING_KEY_TYPE_PROXY, /* format for private key (default: PEM) */
- STRING_SSL_CAPATH_PROXY, /* CA directory name (doesn't work on windows) */
+ STRING_SSL_CAPATH_PROXY, /* CA directory name (does not work on windows) */
STRING_SSL_CAFILE_PROXY, /* certificate file to verify peer against */
STRING_SSL_PINNEDPUBLICKEY_PROXY, /* public key file to verify proxy */
STRING_SSL_CIPHER_LIST_PROXY, /* list of ciphers to use */
@@ -1461,8 +1450,10 @@ enum dupstring {
STRING_COOKIEJAR, /* dump all cookies to this file */
#endif
STRING_CUSTOMREQUEST, /* HTTP/FTP/RTSP request/method to use */
- STRING_DEFAULT_PROTOCOL, /* Protocol to use when the URL doesn't specify */
+ STRING_DEFAULT_PROTOCOL, /* Protocol to use when the URL does not specify */
STRING_DEVICE, /* local network interface/address to use */
+ STRING_INTERFACE, /* local network interface to use */
+ STRING_BINDHOST, /* local address to use */
STRING_ENCODING, /* Accept-Encoding string */
#ifndef CURL_DISABLE_FTP
STRING_FTP_ACCOUNT, /* ftp account data */
@@ -1504,7 +1495,7 @@ enum dupstring {
STRING_SSH_PUBLIC_KEY, /* path to the public key file for auth */
STRING_SSH_HOST_PUBLIC_KEY_MD5, /* md5 of host public key in ascii hex */
STRING_SSH_HOST_PUBLIC_KEY_SHA256, /* sha256 of host public key in base64 */
- STRING_SSH_KNOWNHOSTS, /* file name of knownhosts file */
+ STRING_SSH_KNOWNHOSTS, /* filename of knownhosts file */
#endif
#ifndef CURL_DISABLE_SMTP
STRING_MAIL_FROM,
@@ -1575,7 +1566,7 @@ enum dupblob {
};
/* callback that gets called when this easy handle is completed within a multi
- handle. Only used for internally created transfers, like for example
+ handle. Only used for internally created transfers, like for example
DoH. */
typedef int (*multidone_func)(struct Curl_easy *easy, CURLcode result);
@@ -1600,7 +1591,7 @@ struct UserDefined {
#ifndef CURL_DISABLE_BINDLOCAL
unsigned short localport; /* local port number to bind to */
unsigned short localportrange; /* number of additional port numbers to test
- in case the 'localport' one can't be
+ in case the 'localport' one cannot be
bind()ed */
#endif
curl_write_callback fwrite_func; /* function that stores the output */
@@ -1633,9 +1624,10 @@ struct UserDefined {
void *progress_client; /* pointer to pass to the progress callback */
void *ioctl_client; /* pointer to pass to the ioctl callback */
unsigned int timeout; /* ms, 0 means no timeout */
- unsigned int connecttimeout; /* ms, 0 means no timeout */
+ unsigned int connecttimeout; /* ms, 0 means default timeout */
unsigned int happy_eyeballs_timeout; /* ms, 0 is a valid value */
unsigned int server_response_timeout; /* ms, 0 means no timeout */
+ unsigned int shutdowntimeout; /* ms, 0 means default timeout */
long maxage_conn; /* in seconds, max idle time to allow a connection that
is to be reused */
long maxlifetime_conn; /* in seconds, max time since creation to allow a
@@ -1700,7 +1692,7 @@ struct UserDefined {
struct curl_slist *postquote; /* after the transfer */
struct curl_slist *prequote; /* before the transfer, after type */
/* Despite the name, ftp_create_missing_dirs is for FTP(S) and SFTP
- 1 - create directories that don't exist
+ 1 - create directories that do not exist
2 - the same but also allow MKD to fail once
*/
unsigned char ftp_create_missing_dirs;
@@ -1747,6 +1739,7 @@ struct UserDefined {
int tcp_keepidle; /* seconds in idle before sending keepalive probe */
int tcp_keepintvl; /* seconds between TCP keepalive probes */
+ int tcp_keepcnt; /* maximum number of keepalive probes */
long expect_100_timeout; /* in milliseconds */
#if defined(USE_HTTP2) || defined(USE_HTTP3)
@@ -1795,10 +1788,10 @@ struct UserDefined {
/* Here follows boolean settings that define how to behave during
this session. They are STATIC, set by libcurl users or at least initially
- and they don't change during operations. */
+ and they do not change during operations. */
BIT(quick_exit); /* set 1L when it is okay to leak things (like
- threads), as we're about to exit() anyway and
- don't want lengthy cleanups to delay termination,
+ threads), as we are about to exit() anyway and
+ do not want lengthy cleanups to delay termination,
e.g. after a DNS timeout */
BIT(get_filetime); /* get the time and get of the remote file */
#ifndef CURL_DISABLE_PROXY
@@ -1818,7 +1811,7 @@ struct UserDefined {
us */
BIT(wildcard_enabled); /* enable wildcard matching */
#endif
- BIT(hide_progress); /* don't use the progress meter */
+ BIT(hide_progress); /* do not use the progress meter */
BIT(http_fail_on_error); /* fail on HTTP error codes >= 400 */
BIT(http_keep_sending_on_error); /* for HTTP status codes >= 300 */
BIT(http_follow_location); /* follow HTTP redirects */
@@ -1864,7 +1857,7 @@ struct UserDefined {
#ifdef USE_UNIX_SOCKETS
BIT(abstract_unix_socket);
#endif
- BIT(disallow_username_in_url); /* disallow username in url */
+ BIT(disallow_username_in_url); /* disallow username in URL */
#ifndef CURL_DISABLE_DOH
BIT(doh); /* DNS-over-HTTPS enabled */
BIT(doh_verifypeer); /* DoH certificate peer verification */
diff --git a/libs/libcurl/src/vauth/cleartext.c b/libs/libcurl/src/vauth/cleartext.c
index 015df2c33b..198a452962 100644
--- a/libs/libcurl/src/vauth/cleartext.c
+++ b/libs/libcurl/src/vauth/cleartext.c
@@ -100,11 +100,11 @@ CURLcode Curl_auth_create_plain_message(const char *authzid,
* Curl_auth_create_login_message()
*
* This is used to generate an already encoded LOGIN message containing the
- * user name or password ready for sending to the recipient.
+ * username or password ready for sending to the recipient.
*
* Parameters:
*
- * valuep [in] - The user name or user's password.
+ * valuep [in] - The username or user's password.
* out [out] - The result storage.
*
* Returns void.
@@ -118,11 +118,11 @@ void Curl_auth_create_login_message(const char *valuep, struct bufref *out)
* Curl_auth_create_external_message()
*
* This is used to generate an already encoded EXTERNAL message containing
- * the user name ready for sending to the recipient.
+ * the username ready for sending to the recipient.
*
* Parameters:
*
- * user [in] - The user name.
+ * user [in] - The username.
* out [out] - The result storage.
*
* Returns void.
diff --git a/libs/libcurl/src/vauth/cram.c b/libs/libcurl/src/vauth/cram.c
index c753cc3b51..f8952abbb3 100644
--- a/libs/libcurl/src/vauth/cram.c
+++ b/libs/libcurl/src/vauth/cram.c
@@ -51,7 +51,7 @@
* Parameters:
*
* chlg [in] - The challenge.
- * userp [in] - The user name.
+ * userp [in] - The username.
* passwdp [in] - The user's password.
* out [out] - The result storage.
*
diff --git a/libs/libcurl/src/vauth/digest.c b/libs/libcurl/src/vauth/digest.c
index 59ab4e842e..5e1461fff4 100644
--- a/libs/libcurl/src/vauth/digest.c
+++ b/libs/libcurl/src/vauth/digest.c
@@ -103,7 +103,7 @@ bool Curl_auth_digest_get_pair(const char *str, char *value, char *content,
case ',':
if(!starts_with_quote) {
- /* This signals the end of the content if we didn't get a starting
+ /* This signals the end of the content if we did not get a starting
quote and then we do "sloppy" parsing */
c = 0; /* the end */
continue;
@@ -326,7 +326,7 @@ bool Curl_auth_is_digest_supported(void)
*
* data [in] - The session handle.
* chlg [in] - The challenge message.
- * userp [in] - The user name.
+ * userp [in] - The username.
* passwdp [in] - The user's password.
* service [in] - The service type such as http, smtp, pop or imap.
* out [out] - The result storage.
@@ -629,7 +629,7 @@ CURLcode Curl_auth_decode_digest_http_message(const char *chlg,
}
}
else
- break; /* We're done here */
+ break; /* We are done here */
/* Pass all additional spaces here */
while(*chlg && ISBLANK(*chlg))
@@ -646,7 +646,7 @@ CURLcode Curl_auth_decode_digest_http_message(const char *chlg,
if(before && !digest->stale)
return CURLE_BAD_CONTENT_ENCODING;
- /* We got this header without a nonce, that's a bad Digest line! */
+ /* We got this header without a nonce, that is a bad Digest line! */
if(!digest->nonce)
return CURLE_BAD_CONTENT_ENCODING;
@@ -666,7 +666,7 @@ CURLcode Curl_auth_decode_digest_http_message(const char *chlg,
* Parameters:
*
* data [in] - The session handle.
- * userp [in] - The user name.
+ * userp [in] - The username.
* passwdp [in] - The user's password.
* request [in] - The HTTP request.
* uripath [in] - The path of the HTTP uri.
@@ -788,7 +788,7 @@ static CURLcode auth_create_digest_http_message(
return CURLE_OUT_OF_MEMORY;
if(digest->qop && strcasecompare(digest->qop, "auth-int")) {
- /* We don't support auth-int for PUT or POST */
+ /* We do not support auth-int for PUT or POST */
char hashed[65];
char *hashthis2;
@@ -835,12 +835,12 @@ static CURLcode auth_create_digest_http_message(
Authorization: Digest username="testuser", realm="testrealm", \
nonce="1053604145", uri="/64", response="c55f7f30d83d774a3d2dcacf725abaca"
- Digest parameters are all quoted strings. Username which is provided by
+ Digest parameters are all quoted strings. Username which is provided by
the user will need double quotes and backslashes within it escaped.
realm, nonce, and opaque will need backslashes as well as they were
- de-escaped when copied from request header. cnonce is generated with
- web-safe characters. uri is already percent encoded. nc is 8 hex
- characters. algorithm and qop with standard values only contain web-safe
+ de-escaped when copied from request header. cnonce is generated with
+ web-safe characters. uri is already percent encoded. nc is 8 hex
+ characters. algorithm and qop with standard values only contain web-safe
characters.
*/
userp_quoted = auth_digest_string_quoted(digest->userhash ? userh : userp);
@@ -957,7 +957,7 @@ static CURLcode auth_create_digest_http_message(
* Parameters:
*
* data [in] - The session handle.
- * userp [in] - The user name.
+ * userp [in] - The username.
* passwdp [in] - The user's password.
* request [in] - The HTTP request.
* uripath [in] - The path of the HTTP uri.
diff --git a/libs/libcurl/src/vauth/digest_sspi.c b/libs/libcurl/src/vauth/digest_sspi.c
index d61c8386bc..1562e124f4 100644
--- a/libs/libcurl/src/vauth/digest_sspi.c
+++ b/libs/libcurl/src/vauth/digest_sspi.c
@@ -81,7 +81,7 @@ bool Curl_auth_is_digest_supported(void)
*
* data [in] - The session handle.
* chlg [in] - The challenge message.
- * userp [in] - The user name in the format User or Domain\User.
+ * userp [in] - The username in the format User or Domain\User.
* passwdp [in] - The user's password.
* service [in] - The service type such as http, smtp, pop or imap.
* out [out] - The result storage.
@@ -122,7 +122,7 @@ CURLcode Curl_auth_create_digest_md5_message(struct Curl_easy *data,
status = s_pSecFn->QuerySecurityPackageInfo((TCHAR *) TEXT(SP_NAME_DIGEST),
&SecurityPackage);
if(status != SEC_E_OK) {
- failf(data, "SSPI: couldn't get auth info");
+ failf(data, "SSPI: could not get auth info");
return CURLE_AUTH_ERROR;
}
@@ -291,7 +291,7 @@ CURLcode Curl_override_sspi_http_realm(const char *chlg,
}
}
else
- break; /* We're done here */
+ break; /* We are done here */
/* Pass all additional spaces here */
while(*chlg && ISBLANK(*chlg))
@@ -324,8 +324,8 @@ CURLcode Curl_auth_decode_digest_http_message(const char *chlg,
{
size_t chlglen = strlen(chlg);
- /* We had an input token before so if there's another one now that means we
- provided bad credentials in the previous request or it's stale. */
+ /* We had an input token before so if there is another one now that means we
+ provided bad credentials in the previous request or it is stale. */
if(digest->input_token) {
bool stale = false;
const char *p = chlg;
@@ -379,7 +379,7 @@ CURLcode Curl_auth_decode_digest_http_message(const char *chlg,
* Parameters:
*
* data [in] - The session handle.
- * userp [in] - The user name in the format User or Domain\User.
+ * userp [in] - The username in the format User or Domain\User.
* passwdp [in] - The user's password.
* request [in] - The HTTP request.
* uripath [in] - The path of the HTTP uri.
@@ -413,7 +413,7 @@ CURLcode Curl_auth_create_digest_http_message(struct Curl_easy *data,
status = s_pSecFn->QuerySecurityPackageInfo((TCHAR *) TEXT(SP_NAME_DIGEST),
&SecurityPackage);
if(status != SEC_E_OK) {
- failf(data, "SSPI: couldn't get auth info");
+ failf(data, "SSPI: could not get auth info");
return CURLE_AUTH_ERROR;
}
diff --git a/libs/libcurl/src/vauth/krb5_gssapi.c b/libs/libcurl/src/vauth/krb5_gssapi.c
index 655f908300..b4d3a1e97b 100644
--- a/libs/libcurl/src/vauth/krb5_gssapi.c
+++ b/libs/libcurl/src/vauth/krb5_gssapi.c
@@ -65,10 +65,10 @@ bool Curl_auth_is_gssapi_supported(void)
* Parameters:
*
* data [in] - The session handle.
- * userp [in] - The user name.
+ * userp [in] - The username.
* passwdp [in] - The user's password.
* service [in] - The service type such as http, smtp, pop or imap.
- * host [in[ - The host name.
+ * host [in[ - The hostname.
* mutual_auth [in] - Flag specifying whether or not mutual authentication
* is enabled.
* chlg [in] - Optional challenge message.
@@ -243,7 +243,7 @@ CURLcode Curl_auth_create_gssapi_security_message(struct Curl_easy *data,
/* Process the maximum message size the server can receive */
if(max_size > 0) {
/* The server has told us it supports a maximum receive buffer, however, as
- we don't require one unless we are encrypting data, we tell the server
+ we do not require one unless we are encrypting data, we tell the server
our receive buffer is zero. */
max_size = 0;
}
diff --git a/libs/libcurl/src/vauth/krb5_sspi.c b/libs/libcurl/src/vauth/krb5_sspi.c
index 78999232e3..d93cd6ef42 100644
--- a/libs/libcurl/src/vauth/krb5_sspi.c
+++ b/libs/libcurl/src/vauth/krb5_sspi.c
@@ -76,10 +76,10 @@ bool Curl_auth_is_gssapi_supported(void)
* Parameters:
*
* data [in] - The session handle.
- * userp [in] - The user name in the format User or Domain\User.
+ * userp [in] - The username in the format User or Domain\User.
* passwdp [in] - The user's password.
* service [in] - The service type such as http, smtp, pop or imap.
- * host [in] - The host name.
+ * host [in] - The hostname.
* mutual_auth [in] - Flag specifying whether or not mutual authentication
* is enabled.
* chlg [in] - Optional challenge message.
@@ -122,7 +122,7 @@ CURLcode Curl_auth_create_gssapi_user_message(struct Curl_easy *data,
TEXT(SP_NAME_KERBEROS),
&SecurityPackage);
if(status != SEC_E_OK) {
- failf(data, "SSPI: couldn't get auth info");
+ failf(data, "SSPI: could not get auth info");
return CURLE_AUTH_ERROR;
}
@@ -335,7 +335,7 @@ CURLcode Curl_auth_create_gssapi_security_message(struct Curl_easy *data,
/* Process the maximum message size the server can receive */
if(max_size > 0) {
/* The server has told us it supports a maximum receive buffer, however, as
- we don't require one unless we are encrypting data, we tell the server
+ we do not require one unless we are encrypting data, we tell the server
our receive buffer is zero. */
max_size = 0;
}
diff --git a/libs/libcurl/src/vauth/ntlm.c b/libs/libcurl/src/vauth/ntlm.c
index a434497d42..99bd62470e 100644
--- a/libs/libcurl/src/vauth/ntlm.c
+++ b/libs/libcurl/src/vauth/ntlm.c
@@ -59,7 +59,7 @@
/* "NTLMSSP" signature is always in ASCII regardless of the platform */
#define NTLMSSP_SIGNATURE "\x4e\x54\x4c\x4d\x53\x53\x50"
-/* The fixed host name we provide, in order to not leak our real local host
+/* The fixed hostname we provide, in order to not leak our real local host
name. Copy the name used by Firefox. */
#define NTLM_HOSTNAME "WORKSTATION"
@@ -325,10 +325,10 @@ static void unicodecpy(unsigned char *dest, const char *src, size_t length)
* Parameters:
*
* data [in] - The session handle.
- * userp [in] - The user name in the format User or Domain\User.
+ * userp [in] - The username in the format User or Domain\User.
* passwdp [in] - The user's password.
* service [in] - The service type such as http, smtp, pop or imap.
- * host [in] - The host name.
+ * host [in] - The hostname.
* ntlm [in/out] - The NTLM data struct being used and modified.
* out [out] - The result storage.
*
@@ -384,9 +384,9 @@ CURLcode Curl_auth_create_ntlm_type1_message(struct Curl_easy *data,
"%c%c" /* 2 zeroes */
"%c%c" /* host length */
"%c%c" /* host allocated space */
- "%c%c" /* host name offset */
+ "%c%c" /* hostname offset */
"%c%c" /* 2 zeroes */
- "%s" /* host name */
+ "%s" /* hostname */
"%s", /* domain string */
0, /* trailing zero */
0, 0, 0, /* part of type-1 long */
@@ -448,7 +448,7 @@ CURLcode Curl_auth_create_ntlm_type1_message(struct Curl_easy *data,
* Parameters:
*
* data [in] - The session handle.
- * userp [in] - The user name in the format User or Domain\User.
+ * userp [in] - The username in the format User or Domain\User.
* passwdp [in] - The user's password.
* ntlm [in/out] - The NTLM data struct being used and modified.
* out [out] - The result storage.
@@ -470,7 +470,7 @@ CURLcode Curl_auth_create_ntlm_type3_message(struct Curl_easy *data,
12 LM/LMv2 Response security buffer
20 NTLM/NTLMv2 Response security buffer
28 Target Name security buffer
- 36 User Name security buffer
+ 36 username security buffer
44 Workstation Name security buffer
(52) Session Key security buffer (*)
(60) Flags long (*)
@@ -482,9 +482,9 @@ CURLcode Curl_auth_create_ntlm_type3_message(struct Curl_easy *data,
CURLcode result = CURLE_OK;
size_t size;
unsigned char ntlmbuf[NTLM_BUFSIZE];
- int lmrespoff;
+ unsigned int lmrespoff;
unsigned char lmresp[24]; /* fixed-size */
- int ntrespoff;
+ unsigned int ntrespoff;
unsigned int ntresplen = 24;
unsigned char ntresp[24]; /* fixed-size */
unsigned char *ptr_ntresp = &ntresp[0];
@@ -517,7 +517,7 @@ CURLcode Curl_auth_create_ntlm_type3_message(struct Curl_easy *data,
userlen = strlen(user);
#ifndef NTLM_HOSTNAME
- /* Get the machine's un-qualified host name as NTLM doesn't like the fully
+ /* Get the machine's un-qualified hostname as NTLM does not like the fully
qualified domain name */
if(Curl_gethostname(host, sizeof(host))) {
infof(data, "gethostname() failed, continuing without");
@@ -585,7 +585,7 @@ CURLcode Curl_auth_create_ntlm_type3_message(struct Curl_easy *data,
return result;
Curl_ntlm_core_lm_resp(lmbuffer, &ntlm->nonce[0], lmresp);
- ntlm->flags &= ~NTLMFLAG_NEGOTIATE_NTLM2_KEY;
+ ntlm->flags &= ~(unsigned int)NTLMFLAG_NEGOTIATE_NTLM2_KEY;
/* A safer but less compatible alternative is:
* Curl_ntlm_core_lm_resp(ntbuffer, &ntlm->nonce[0], lmresp);
@@ -722,7 +722,7 @@ CURLcode Curl_auth_create_ntlm_type3_message(struct Curl_easy *data,
/* Make sure that the domain, user and host strings fit in the
buffer before we copy them there. */
if(size + userlen + domlen + hostlen >= NTLM_BUFSIZE) {
- failf(data, "user + domain + host name too big");
+ failf(data, "user + domain + hostname too big");
return CURLE_OUT_OF_MEMORY;
}
diff --git a/libs/libcurl/src/vauth/ntlm_sspi.c b/libs/libcurl/src/vauth/ntlm_sspi.c
index f8c3558603..2da0789d73 100644
--- a/libs/libcurl/src/vauth/ntlm_sspi.c
+++ b/libs/libcurl/src/vauth/ntlm_sspi.c
@@ -75,10 +75,10 @@ bool Curl_auth_is_ntlm_supported(void)
* Parameters:
*
* data [in] - The session handle.
- * userp [in] - The user name in the format User or Domain\User.
+ * userp [in] - The username in the format User or Domain\User.
* passwdp [in] - The user's password.
* service [in] - The service type such as http, smtp, pop or imap.
- * host [in] - The host name.
+ * host [in] - The hostname.
* ntlm [in/out] - The NTLM data struct being used and modified.
* out [out] - The result storage.
*
@@ -106,7 +106,7 @@ CURLcode Curl_auth_create_ntlm_type1_message(struct Curl_easy *data,
status = s_pSecFn->QuerySecurityPackageInfo((TCHAR *) TEXT(SP_NAME_NTLM),
&SecurityPackage);
if(status != SEC_E_OK) {
- failf(data, "SSPI: couldn't get auth info");
+ failf(data, "SSPI: could not get auth info");
return CURLE_AUTH_ERROR;
}
@@ -233,7 +233,7 @@ CURLcode Curl_auth_decode_ntlm_type2_message(struct Curl_easy *data,
* Parameters:
*
* data [in] - The session handle.
- * userp [in] - The user name in the format User or Domain\User.
+ * userp [in] - The username in the format User or Domain\User.
* passwdp [in] - The user's password.
* ntlm [in/out] - The NTLM data struct being used and modified.
* out [out] - The result storage.
diff --git a/libs/libcurl/src/vauth/oauth2.c b/libs/libcurl/src/vauth/oauth2.c
index 112bfe8341..00bb9867ec 100644
--- a/libs/libcurl/src/vauth/oauth2.c
+++ b/libs/libcurl/src/vauth/oauth2.c
@@ -49,8 +49,8 @@
*
* Parameters:
*
- * user[in] - The user name.
- * host[in] - The host name.
+ * user[in] - The username.
+ * host[in] - The hostname.
* port[in] - The port(when not Port 80).
* bearer[in] - The bearer token.
* out[out] - The result storage.
@@ -87,7 +87,7 @@ CURLcode Curl_auth_create_oauth_bearer_message(const char *user,
*
* Parameters:
*
- * user[in] - The user name.
+ * user[in] - The username.
* bearer[in] - The bearer token.
* out[out] - The result storage.
*
diff --git a/libs/libcurl/src/vauth/spnego_gssapi.c b/libs/libcurl/src/vauth/spnego_gssapi.c
index 71d37ae232..d7a533eb98 100644
--- a/libs/libcurl/src/vauth/spnego_gssapi.c
+++ b/libs/libcurl/src/vauth/spnego_gssapi.c
@@ -65,10 +65,10 @@ bool Curl_auth_is_spnego_supported(void)
* Parameters:
*
* data [in] - The session handle.
- * userp [in] - The user name in the format User or Domain\User.
+ * userp [in] - The username in the format User or Domain\User.
* passwdp [in] - The user's password.
* service [in] - The service type such as http, smtp, pop or imap.
- * host [in] - The host name.
+ * host [in] - The hostname.
* chlg64 [in] - The optional base64 encoded challenge message.
* nego [in/out] - The Negotiate data struct being used and modified.
*
@@ -97,8 +97,8 @@ CURLcode Curl_auth_decode_spnego_message(struct Curl_easy *data,
if(nego->context && nego->status == GSS_S_COMPLETE) {
/* We finished successfully our part of authentication, but server
- * rejected it (since we're again here). Exit with an error since we
- * can't invent anything better */
+ * rejected it (since we are again here). Exit with an error since we
+ * cannot invent anything better */
Curl_auth_cleanup_spnego(nego);
return CURLE_LOGIN_DENIED;
}
diff --git a/libs/libcurl/src/vauth/spnego_sspi.c b/libs/libcurl/src/vauth/spnego_sspi.c
index 0f0d198512..a1883d0cd3 100644
--- a/libs/libcurl/src/vauth/spnego_sspi.c
+++ b/libs/libcurl/src/vauth/spnego_sspi.c
@@ -79,10 +79,10 @@ bool Curl_auth_is_spnego_supported(void)
* Parameters:
*
* data [in] - The session handle.
- * user [in] - The user name in the format User or Domain\User.
+ * user [in] - The username in the format User or Domain\User.
* password [in] - The user's password.
* service [in] - The service type such as http, smtp, pop or imap.
- * host [in] - The host name.
+ * host [in] - The hostname.
* chlg64 [in] - The optional base64 encoded challenge message.
* nego [in/out] - The Negotiate data struct being used and modified.
*
@@ -113,8 +113,8 @@ CURLcode Curl_auth_decode_spnego_message(struct Curl_easy *data,
if(nego->context && nego->status == SEC_E_OK) {
/* We finished successfully our part of authentication, but server
- * rejected it (since we're again here). Exit with an error since we
- * can't invent anything better */
+ * rejected it (since we are again here). Exit with an error since we
+ * cannot invent anything better */
Curl_auth_cleanup_spnego(nego);
return CURLE_LOGIN_DENIED;
}
@@ -128,11 +128,11 @@ CURLcode Curl_auth_decode_spnego_message(struct Curl_easy *data,
if(!nego->output_token) {
/* Query the security package for Negotiate */
- nego->status = s_pSecFn->QuerySecurityPackageInfo((TCHAR *)
- TEXT(SP_NAME_NEGOTIATE),
- &SecurityPackage);
+ nego->status = (DWORD)s_pSecFn->QuerySecurityPackageInfo((TCHAR *)
+ TEXT(SP_NAME_NEGOTIATE),
+ &SecurityPackage);
if(nego->status != SEC_E_OK) {
- failf(data, "SSPI: couldn't get auth info");
+ failf(data, "SSPI: could not get auth info");
return CURLE_AUTH_ERROR;
}
@@ -168,7 +168,7 @@ CURLcode Curl_auth_decode_spnego_message(struct Curl_easy *data,
return CURLE_OUT_OF_MEMORY;
/* Acquire our credentials handle */
- nego->status =
+ nego->status = (DWORD)
s_pSecFn->AcquireCredentialsHandle(NULL,
(TCHAR *)TEXT(SP_NAME_NEGOTIATE),
SECPKG_CRED_OUTBOUND, NULL,
@@ -218,7 +218,7 @@ CURLcode Curl_auth_decode_spnego_message(struct Curl_easy *data,
SEC_CHANNEL_BINDINGS channelBindings;
SecPkgContext_Bindings pkgBindings;
pkgBindings.Bindings = &channelBindings;
- nego->status = s_pSecFn->QueryContextAttributes(
+ nego->status = (DWORD)s_pSecFn->QueryContextAttributes(
nego->sslContext,
SECPKG_ATTR_ENDPOINT_BINDINGS,
&pkgBindings
@@ -242,7 +242,7 @@ CURLcode Curl_auth_decode_spnego_message(struct Curl_easy *data,
resp_buf.cbBuffer = curlx_uztoul(nego->token_max);
/* Generate our challenge-response message */
- nego->status = s_pSecFn->InitializeSecurityContext(nego->credentials,
+ nego->status = (DWORD)s_pSecFn->InitializeSecurityContext(nego->credentials,
chlg ? nego->context :
NULL,
nego->spn,
@@ -259,7 +259,7 @@ CURLcode Curl_auth_decode_spnego_message(struct Curl_easy *data,
if(GSS_ERROR(nego->status)) {
char buffer[STRERROR_LEN];
failf(data, "InitializeSecurityContext failed: %s",
- Curl_sspi_strerror(nego->status, buffer, sizeof(buffer)));
+ Curl_sspi_strerror((int)nego->status, buffer, sizeof(buffer)));
if(nego->status == (DWORD)SEC_E_INSUFFICIENT_MEMORY)
return CURLE_OUT_OF_MEMORY;
@@ -269,11 +269,12 @@ CURLcode Curl_auth_decode_spnego_message(struct Curl_easy *data,
if(nego->status == SEC_I_COMPLETE_NEEDED ||
nego->status == SEC_I_COMPLETE_AND_CONTINUE) {
- nego->status = s_pSecFn->CompleteAuthToken(nego->context, &resp_desc);
+ nego->status = (DWORD)s_pSecFn->CompleteAuthToken(nego->context,
+ &resp_desc);
if(GSS_ERROR(nego->status)) {
char buffer[STRERROR_LEN];
failf(data, "CompleteAuthToken failed: %s",
- Curl_sspi_strerror(nego->status, buffer, sizeof(buffer)));
+ Curl_sspi_strerror((int)nego->status, buffer, sizeof(buffer)));
if(nego->status == (DWORD)SEC_E_INSUFFICIENT_MEMORY)
return CURLE_OUT_OF_MEMORY;
diff --git a/libs/libcurl/src/vauth/vauth.c b/libs/libcurl/src/vauth/vauth.c
index 0960c2f2f3..98f5fd4f67 100644
--- a/libs/libcurl/src/vauth/vauth.c
+++ b/libs/libcurl/src/vauth/vauth.c
@@ -48,7 +48,7 @@
* Parameters:
*
* service [in] - The service type such as http, smtp, pop or imap.
- * host [in] - The host name.
+ * host [in] - The hostname.
* realm [in] - The realm.
*
* Returns a pointer to the newly allocated SPN.
@@ -93,7 +93,7 @@ TCHAR *Curl_auth_build_spn(const char *service, const char *host,
return NULL;
/* Allocate and return a TCHAR based SPN. Since curlx_convert_UTF8_to_tchar
- must be freed by curlx_unicodefree we'll dupe the result so that the
+ must be freed by curlx_unicodefree we will dupe the result so that the
pointer this function returns can be normally free'd. */
tchar_spn = curlx_convert_UTF8_to_tchar(utf8_spn);
free(utf8_spn);
@@ -115,14 +115,14 @@ TCHAR *Curl_auth_build_spn(const char *service, const char *host,
* Domain/User (curl Down-level format - for compatibility with existing code)
* User@Domain (User Principal Name)
*
- * Note: The user name may be empty when using a GSS-API library or Windows
+ * Note: The username may be empty when using a GSS-API library or Windows
* SSPI as the user and domain are either obtained from the credentials cache
* when using GSS-API or via the currently logged in user's credentials when
* using Windows SSPI.
*
* Parameters:
*
- * user [in] - The user name.
+ * user [in] - The username.
*
* Returns TRUE on success; otherwise FALSE.
*/
diff --git a/libs/libcurl/src/version.c b/libs/libcurl/src/version.c
index bb63f2583f..324e55e3b3 100644
--- a/libs/libcurl/src/version.c
+++ b/libs/libcurl/src/version.c
@@ -258,10 +258,11 @@ char *curl_version(void)
api.ldapai_info_version = LDAP_API_INFO_VERSION;
if(ldap_get_option(NULL, LDAP_OPT_API_INFO, &api) == LDAP_OPT_SUCCESS) {
- unsigned int patch = api.ldapai_vendor_version % 100;
- unsigned int major = api.ldapai_vendor_version / 10000;
+ unsigned int patch = (unsigned int)(api.ldapai_vendor_version % 100);
+ unsigned int major = (unsigned int)(api.ldapai_vendor_version / 10000);
unsigned int minor =
- ((api.ldapai_vendor_version - major * 10000) - patch) / 100;
+ (((unsigned int)api.ldapai_vendor_version - major * 10000)
+ - patch) / 100;
msnprintf(ldap_buf, sizeof(ldap_buf), "%s/%u.%u.%u",
api.ldapai_vendor_name, major, minor, patch);
src[i++] = ldap_buf;
@@ -394,7 +395,7 @@ static const char * const supported_protocols[] = {
};
/*
- * Feature presence run-time check functions.
+ * Feature presence runtime check functions.
*
* Warning: the value returned by these should not change between
* curl_global_init() and curl_global_cleanup() calls.
@@ -540,7 +541,7 @@ static curl_version_info_data version_info = {
LIBCURL_VERSION,
LIBCURL_VERSION_NUM,
OS, /* as found by configure or set by hand at build-time */
- 0, /* features bitmask is built at run-time */
+ 0, /* features bitmask is built at runtime */
NULL, /* ssl_version */
0, /* ssl_version_num, this is kept at zero */
NULL, /* zlib_version */
@@ -596,7 +597,7 @@ curl_version_info_data *curl_version_info(CURLversion stamp)
static char zstd_buffer[80];
#endif
- (void)stamp; /* avoid compiler warnings, we don't use this */
+ (void)stamp; /* avoid compiler warnings, we do not use this */
#ifdef USE_SSL
Curl_ssl_version(ssl_buffer, sizeof(ssl_buffer));
@@ -640,7 +641,7 @@ curl_version_info_data *curl_version_info(CURLversion stamp)
#ifdef USE_NGHTTP2
{
nghttp2_info *h2 = nghttp2_version(0);
- version_info.nghttp2_ver_num = h2->version_num;
+ version_info.nghttp2_ver_num = (unsigned int)h2->version_num;
version_info.nghttp2_version = h2->version_str;
}
#endif
diff --git a/libs/libcurl/src/version_win32.c b/libs/libcurl/src/version_win32.c
index 8759dcc325..4898ff1560 100644
--- a/libs/libcurl/src/version_win32.c
+++ b/libs/libcurl/src/version_win32.c
@@ -30,8 +30,10 @@
#include "version_win32.h"
#include "warnless.h"
-/* The last #include files should be: */
+/* The last 2 #include files should be in this order */
+#ifdef BUILDING_LIBCURL
#include "curl_memory.h"
+#endif
#include "memdebug.h"
/* This Unicode version struct works for VerifyVersionInfoW (OSVERSIONINFOEXW)
@@ -63,7 +65,7 @@ struct OUR_OSVERSIONINFOEXW {
* ignored.
* platform [in] - The optional platform identifier.
* condition [in] - The test condition used to specifier whether we are
- * checking a version less then, equal to or greater than
+ * checking a version less than, equal to or greater than
* what is specified in the major and minor version
* numbers.
*
@@ -78,13 +80,13 @@ bool curlx_verify_windows_version(const unsigned int majorVersion,
bool matched = FALSE;
#if defined(CURL_WINDOWS_APP)
- (void)buildVersion;
-
/* We have no way to determine the Windows version from Windows apps,
- so let's assume we're running on the target Windows version. */
+ so let's assume we are running on the target Windows version. */
const WORD fullVersion = MAKEWORD(minorVersion, majorVersion);
const WORD targetVersion = (WORD)_WIN32_WINNT;
+ (void)buildVersion;
+
switch(condition) {
case VERSION_LESS_THAN:
matched = targetVersion < fullVersion;
@@ -108,7 +110,7 @@ bool curlx_verify_windows_version(const unsigned int majorVersion,
}
if(matched && (platform == PLATFORM_WINDOWS)) {
- /* we're always running on PLATFORM_WINNT */
+ /* we are always running on PLATFORM_WINNT */
matched = FALSE;
}
#elif !defined(_WIN32_WINNT) || !defined(_WIN32_WINNT_WIN2K) || \
diff --git a/libs/libcurl/src/vquic/curl_msh3.c b/libs/libcurl/src/vquic/curl_msh3.c
index 5827d99c24..a780acc31f 100644
--- a/libs/libcurl/src/vquic/curl_msh3.c
+++ b/libs/libcurl/src/vquic/curl_msh3.c
@@ -293,7 +293,7 @@ static const MSH3_REQUEST_IF msh3_request_if = {
msh3_data_sent
};
-/* Decode HTTP status code. Returns -1 if no valid status code was
+/* Decode HTTP status code. Returns -1 if no valid status code was
decoded. (duplicate from http2.c) */
static int decode_status_code(const char *value, size_t len)
{
@@ -689,7 +689,7 @@ static ssize_t cf_msh3_send(struct Curl_cfilter *cf, struct Curl_easy *data,
}
/* TODO - msh3/msquic will hold onto this memory until the send complete
- event. How do we make sure curl doesn't free it until then? */
+ event. How do we make sure curl does not free it until then? */
*err = CURLE_OK;
nwritten = len;
}
@@ -840,7 +840,7 @@ static CURLcode cf_connect_start(struct Curl_cfilter *cf,
ctx->api = MsH3ApiOpen();
if(!ctx->api) {
- failf(data, "can't create msh3 api");
+ failf(data, "cannot create msh3 api");
return CURLE_FAILED_INIT;
}
@@ -851,7 +851,7 @@ static CURLcode cf_connect_start(struct Curl_cfilter *cf,
&addr,
!verify);
if(!ctx->qconn) {
- failf(data, "can't create msh3 connection");
+ failf(data, "cannot create msh3 connection");
if(ctx->api) {
MsH3ApiClose(ctx->api);
ctx->api = NULL;
@@ -883,7 +883,7 @@ static CURLcode cf_msh3_connect(struct Curl_cfilter *cf,
CF_DATA_SAVE(save, cf, data);
if(ctx->sock[SP_LOCAL] == CURL_SOCKET_BAD) {
- if(Curl_socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx->sock[0]) < 0) {
+ if(Curl_socketpair(AF_UNIX, SOCK_STREAM, 0, &ctx->sock[0], FALSE) < 0) {
ctx->sock[SP_LOCAL] = CURL_SOCKET_BAD;
ctx->sock[SP_REMOTE] = CURL_SOCKET_BAD;
return CURLE_COULDNT_CONNECT;
@@ -1038,6 +1038,7 @@ struct Curl_cftype Curl_cft_http3 = {
cf_msh3_destroy,
cf_msh3_connect,
cf_msh3_close,
+ Curl_cf_def_shutdown,
Curl_cf_def_get_host,
cf_msh3_adjust_pollset,
cf_msh3_data_pending,
diff --git a/libs/libcurl/src/vquic/curl_ngtcp2.c b/libs/libcurl/src/vquic/curl_ngtcp2.c
index 9bc4b3a5a5..790e3c6ff4 100644
--- a/libs/libcurl/src/vquic/curl_ngtcp2.c
+++ b/libs/libcurl/src/vquic/curl_ngtcp2.c
@@ -88,7 +88,7 @@
/* The pool keeps spares around and half of a full stream windows
* seems good. More does not seem to improve performance.
* The benefit of the pool is that stream buffer to not keep
- * spares. So memory consumption goes down when streams run empty,
+ * spares. Memory consumption goes down when streams run empty,
* have a large upload done, etc. */
#define H3_STREAM_POOL_SPARES \
(H3_STREAM_WINDOW_SIZE / H3_STREAM_CHUNK_SIZE ) / 2
@@ -138,7 +138,7 @@ struct cf_ngtcp2_ctx {
uint64_t used_bidi_streams; /* bidi streams we have opened */
uint64_t max_bidi_streams; /* max bidi streams we can open */
int qlogfd;
- BIT(conn_closed); /* connection is closed */
+ BIT(shutdown_started); /* queued shutdown packets */
};
/* How to access `call_data` from a cf_ngtcp2 filter */
@@ -162,7 +162,6 @@ struct h3_stream_ctx {
struct bufq sendbuf; /* h3 request body */
struct h1_req_parser h1; /* h1 request parsing */
size_t sendbuf_len_in_flight; /* sendbuf amount "in flight" */
- size_t upload_blocked_len; /* the amount written last and EGAINed */
curl_uint64_t error3; /* HTTP/3 stream error code */
curl_off_t upload_left; /* number of request bytes left to upload */
int status_code; /* HTTP status code */
@@ -198,7 +197,7 @@ static CURLcode h3_data_setup(struct Curl_cfilter *cf,
struct cf_ngtcp2_ctx *ctx = cf->ctx;
struct h3_stream_ctx *stream = H3_STREAM_CTX(ctx, data);
- if(!data || !data->req.p.http) {
+ if(!data) {
failf(data, "initialization failure, transfer not http initialized");
return CURLE_FAILED_INIT;
}
@@ -326,8 +325,8 @@ static void pktx_update_time(struct pkt_io_ctx *pktx,
struct cf_ngtcp2_ctx *ctx = cf->ctx;
vquic_ctx_update_time(&ctx->q);
- pktx->ts = ctx->q.last_op.tv_sec * NGTCP2_SECONDS +
- ctx->q.last_op.tv_usec * NGTCP2_MICROSECONDS;
+ pktx->ts = (ngtcp2_tstamp)ctx->q.last_op.tv_sec * NGTCP2_SECONDS +
+ (ngtcp2_tstamp)ctx->q.last_op.tv_usec * NGTCP2_MICROSECONDS;
}
static void pktx_init(struct pkt_io_ctx *pktx,
@@ -417,7 +416,7 @@ static void quic_settings(struct cf_ngtcp2_ctx *ctx,
}
}
-static int init_ngh3_conn(struct Curl_cfilter *cf);
+static CURLcode init_ngh3_conn(struct Curl_cfilter *cf);
static int cb_handshake_completed(ngtcp2_conn *tconn, void *user_data)
{
@@ -506,8 +505,8 @@ static int cb_recv_stream_data(ngtcp2_conn *tconn, uint32_t flags,
/* number of bytes inside buflen which consists of framing overhead
* including QPACK HEADERS. In other words, it does not consume payload of
* DATA frame. */
- ngtcp2_conn_extend_max_stream_offset(tconn, stream_id, nconsumed);
- ngtcp2_conn_extend_max_offset(tconn, nconsumed);
+ ngtcp2_conn_extend_max_stream_offset(tconn, stream_id, (uint64_t)nconsumed);
+ ngtcp2_conn_extend_max_offset(tconn, (uint64_t)nconsumed);
return 0;
}
@@ -663,7 +662,7 @@ static void cb_rand(uint8_t *dest, size_t destlen,
result = Curl_rand(NULL, dest, destlen);
if(result) {
- /* cb_rand is only used for non-cryptographic context. If Curl_rand
+ /* cb_rand is only used for non-cryptographic context. If Curl_rand
failed, just fill 0 and call it *random*. */
memset(dest, 0, destlen);
}
@@ -798,7 +797,8 @@ static CURLcode check_and_set_expiry(struct Curl_cfilter *cf,
if(timeout % NGTCP2_MILLISECONDS) {
timeout += NGTCP2_MILLISECONDS;
}
- Curl_expire(data, timeout / NGTCP2_MILLISECONDS, EXPIRE_QUIC);
+ Curl_expire(data, (timediff_t)(timeout / NGTCP2_MILLISECONDS),
+ EXPIRE_QUIC);
}
}
return CURLE_OK;
@@ -815,6 +815,9 @@ static void cf_ngtcp2_adjust_pollset(struct Curl_cfilter *cf,
return;
Curl_pollset_check(data, ps, ctx->q.sockfd, &want_recv, &want_send);
+ if(!want_send && !Curl_bufq_is_empty(&ctx->q.sendbuf))
+ want_send = TRUE;
+
if(want_recv || want_send) {
struct h3_stream_ctx *stream = H3_STREAM_CTX(ctx, data);
struct cf_call_data save;
@@ -954,7 +957,7 @@ static int cb_h3_end_headers(nghttp3_conn *conn, int64_t sid,
if(!stream)
return 0;
- /* add a CRLF only if we've received some headers */
+ /* add a CRLF only if we have received some headers */
h3_xfer_write_resp_hd(cf, data, stream, STRCONST("\r\n"), stream->closed);
CURL_TRC_CF(data, cf, "[%" CURL_PRId64 "] end_headers, status=%d",
@@ -1091,7 +1094,7 @@ static nghttp3_callbacks ngh3_callbacks = {
NULL /* recv_settings */
};
-static int init_ngh3_conn(struct Curl_cfilter *cf)
+static CURLcode init_ngh3_conn(struct Curl_cfilter *cf)
{
struct cf_ngtcp2_ctx *ctx = cf->ctx;
CURLcode result;
@@ -1202,7 +1205,7 @@ static ssize_t cf_ngtcp2_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
pktx_init(&pktx, cf, data);
- if(!stream || ctx->conn_closed) {
+ if(!stream || ctx->shutdown_started) {
*err = CURLE_RECV_ERROR;
goto out;
}
@@ -1268,8 +1271,8 @@ static int cb_h3_acked_req_body(nghttp3_conn *conn, int64_t stream_id,
Curl_bufq_skip(&stream->sendbuf, skiplen);
stream->sendbuf_len_in_flight -= skiplen;
- /* Everything ACKed, we resume upload processing */
- if(!stream->sendbuf_len_in_flight) {
+ /* Resume upload processing if we have more data to send */
+ if(stream->sendbuf_len_in_flight < Curl_bufq_len(&stream->sendbuf)) {
int rv = nghttp3_conn_resume_stream(conn, stream_id);
if(rv && rv != NGHTTP3_ERR_STREAM_NOT_FOUND) {
return NGTCP2_ERR_CALLBACK_FAILURE;
@@ -1504,7 +1507,7 @@ static ssize_t cf_ngtcp2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
}
if(!stream || stream->id < 0) {
- if(ctx->conn_closed) {
+ if(ctx->shutdown_started) {
CURL_TRC_CF(data, cf, "cannot open stream on closed connection");
*err = CURLE_SEND_ERROR;
sent = -1;
@@ -1524,21 +1527,6 @@ static ssize_t cf_ngtcp2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
sent = -1;
goto out;
}
- else if(stream->upload_blocked_len) {
- /* the data in `buf` has already been submitted or added to the
- * buffers, but have been EAGAINed on the last invocation. */
- DEBUGASSERT(len >= stream->upload_blocked_len);
- if(len < stream->upload_blocked_len) {
- /* Did we get called again with a smaller `len`? This should not
- * happen. We are not prepared to handle that. */
- failf(data, "HTTP/3 send again with decreased length");
- *err = CURLE_HTTP3;
- sent = -1;
- goto out;
- }
- sent = (ssize_t)stream->upload_blocked_len;
- stream->upload_blocked_len = 0;
- }
else if(stream->closed) {
if(stream->resp_hds_complete) {
/* Server decided to close the stream after having sent us a final
@@ -1558,7 +1546,7 @@ static ssize_t cf_ngtcp2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
sent = -1;
goto out;
}
- else if(ctx->conn_closed) {
+ else if(ctx->shutdown_started) {
CURL_TRC_CF(data, cf, "cannot send on closed connection");
*err = CURLE_SEND_ERROR;
sent = -1;
@@ -1582,18 +1570,6 @@ static ssize_t cf_ngtcp2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
sent = -1;
}
- if(stream && sent > 0 && stream->sendbuf_len_in_flight) {
- /* We have unacknowledged DATA and cannot report success to our
- * caller. Instead we EAGAIN and remember how much we have already
- * "written" into our various internal connection buffers. */
- stream->upload_blocked_len = sent;
- CURL_TRC_CF(data, cf, "[%" CURL_PRId64 "] cf_send(len=%zu), "
- "%zu bytes in flight -> EGAIN", stream->id, len,
- stream->sendbuf_len_in_flight);
- *err = CURLE_AGAIN;
- sent = -1;
- }
-
out:
result = check_and_set_expiry(cf, data, &pktx);
if(result) {
@@ -1631,7 +1607,7 @@ static CURLcode recv_pkt(const unsigned char *pkt, size_t pktlen,
++pktx->pkt_count;
ngtcp2_addr_init(&path.local, (struct sockaddr *)&ctx->q.local_addr,
- ctx->q.local_addrlen);
+ (socklen_t)ctx->q.local_addrlen);
ngtcp2_addr_init(&path.remote, (struct sockaddr *)remote_addr,
remote_addrlen);
pi.ecn = (uint8_t)ecn;
@@ -1867,7 +1843,7 @@ static CURLcode cf_progress_egress(struct Curl_cfilter *cf,
DEBUGASSERT(nread > 0);
if(pktcnt == 0) {
/* first packet in buffer. This is either of a known, "good"
- * payload size or it is a PMTUD. We'll see. */
+ * payload size or it is a PMTUD. We will see. */
gsolen = (size_t)nread;
}
else if((size_t)nread > gsolen ||
@@ -1961,7 +1937,8 @@ static CURLcode cf_ngtcp2_data_event(struct Curl_cfilter *cf,
struct h3_stream_ctx *stream = H3_STREAM_CTX(ctx, data);
if(stream && !stream->send_closed) {
stream->send_closed = TRUE;
- stream->upload_left = Curl_bufq_len(&stream->sendbuf);
+ stream->upload_left = Curl_bufq_len(&stream->sendbuf) -
+ stream->sendbuf_len_in_flight;
(void)nghttp3_conn_resume_stream(ctx->h3conn, stream->id);
}
break;
@@ -2007,29 +1984,97 @@ static void cf_ngtcp2_ctx_clear(struct cf_ngtcp2_ctx *ctx)
ctx->call_data = save;
}
-static void cf_ngtcp2_conn_close(struct Curl_cfilter *cf,
- struct Curl_easy *data)
+static CURLcode cf_ngtcp2_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data, bool *done)
{
struct cf_ngtcp2_ctx *ctx = cf->ctx;
- if(ctx && ctx->qconn && !ctx->conn_closed) {
+ struct cf_call_data save;
+ struct pkt_io_ctx pktx;
+ CURLcode result = CURLE_OK;
+
+ if(cf->shutdown || !ctx->qconn) {
+ *done = TRUE;
+ return CURLE_OK;
+ }
+
+ CF_DATA_SAVE(save, cf, data);
+ *done = FALSE;
+ pktx_init(&pktx, cf, data);
+
+ if(!ctx->shutdown_started) {
char buffer[NGTCP2_MAX_UDP_PAYLOAD_SIZE];
- struct pkt_io_ctx pktx;
- ngtcp2_ssize rc;
-
- ctx->conn_closed = TRUE;
- pktx_init(&pktx, cf, data);
- rc = ngtcp2_conn_write_connection_close(ctx->qconn, NULL, /* path */
- NULL, /* pkt_info */
- (uint8_t *)buffer, sizeof(buffer),
- &ctx->last_error, pktx.ts);
- CURL_TRC_CF(data, cf, "closing connection(err_type=%d, err_code=%"
+ ngtcp2_ssize nwritten;
+
+ if(!Curl_bufq_is_empty(&ctx->q.sendbuf)) {
+ CURL_TRC_CF(data, cf, "shutdown, flushing sendbuf");
+ result = cf_progress_egress(cf, data, &pktx);
+ if(!Curl_bufq_is_empty(&ctx->q.sendbuf)) {
+ CURL_TRC_CF(data, cf, "sending shutdown packets blocked");
+ result = CURLE_OK;
+ goto out;
+ }
+ else if(result) {
+ CURL_TRC_CF(data, cf, "shutdown, error %d flushing sendbuf", result);
+ *done = TRUE;
+ goto out;
+ }
+ }
+
+ ctx->shutdown_started = TRUE;
+ nwritten = ngtcp2_conn_write_connection_close(
+ ctx->qconn, NULL, /* path */
+ NULL, /* pkt_info */
+ (uint8_t *)buffer, sizeof(buffer),
+ &ctx->last_error, pktx.ts);
+ CURL_TRC_CF(data, cf, "start shutdown(err_type=%d, err_code=%"
CURL_PRIu64 ") -> %d", ctx->last_error.type,
- (curl_uint64_t)ctx->last_error.error_code, (int)rc);
- if(rc > 0) {
- while((send(ctx->q.sockfd, buffer, (SEND_TYPE_ARG3)rc, 0) == -1) &&
- SOCKERRNO == EINTR);
+ (curl_uint64_t)ctx->last_error.error_code, (int)nwritten);
+ if(nwritten > 0) {
+ Curl_bufq_write(&ctx->q.sendbuf, (const unsigned char *)buffer,
+ (size_t)nwritten, &result);
+ if(result) {
+ CURL_TRC_CF(data, cf, "error %d adding shutdown packets to sendbuf, "
+ "aborting shutdown", result);
+ goto out;
+ }
+ ctx->q.no_gso = TRUE;
+ ctx->q.gsolen = (size_t)nwritten;
+ ctx->q.split_len = 0;
}
}
+
+ if(!Curl_bufq_is_empty(&ctx->q.sendbuf)) {
+ CURL_TRC_CF(data, cf, "shutdown, flushing egress");
+ result = vquic_flush(cf, data, &ctx->q);
+ if(result == CURLE_AGAIN) {
+ CURL_TRC_CF(data, cf, "sending shutdown packets blocked");
+ result = CURLE_OK;
+ goto out;
+ }
+ else if(result) {
+ CURL_TRC_CF(data, cf, "shutdown, error %d flushing sendbuf", result);
+ *done = TRUE;
+ goto out;
+ }
+ }
+
+ if(Curl_bufq_is_empty(&ctx->q.sendbuf)) {
+ /* Sent everything off. ngtcp2 seems to have no support for graceful
+ * shutdowns. So, we are done. */
+ CURL_TRC_CF(data, cf, "shutdown completely sent off, done");
+ *done = TRUE;
+ result = CURLE_OK;
+ }
+out:
+ CF_DATA_RESTORE(cf, save);
+ return result;
+}
+
+static void cf_ngtcp2_conn_close(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
+{
+ bool done;
+ cf_ngtcp2_shutdown(cf, data, &done);
}
static void cf_ngtcp2_close(struct Curl_cfilter *cf, struct Curl_easy *data)
@@ -2105,7 +2150,7 @@ static CURLcode tls_ctx_setup(struct Curl_cfilter *cf,
return CURLE_FAILED_INIT;
}
#endif /* !OPENSSL_IS_BORINGSSL && !OPENSSL_IS_AWSLC */
- /* Enable the session cache because it's a prerequisite for the
+ /* Enable the session cache because it is a prerequisite for the
* "new session" callback. Use the "external storage" mode to prevent
* OpenSSL from creating an internal session cache.
*/
@@ -2120,7 +2165,7 @@ static CURLcode tls_ctx_setup(struct Curl_cfilter *cf,
return CURLE_FAILED_INIT;
}
#elif defined(USE_WOLFSSL)
- if(ngtcp2_crypto_wolfssl_configure_client_context(ctx->ssl_ctx) != 0) {
+ if(ngtcp2_crypto_wolfssl_configure_client_context(ctx->wssl.ctx) != 0) {
failf(data, "ngtcp2_crypto_wolfssl_configure_client_context failed");
return CURLE_FAILED_INIT;
}
@@ -2196,7 +2241,7 @@ static CURLcode cf_connect_start(struct Curl_cfilter *cf,
(struct sockaddr *)&ctx->q.local_addr,
ctx->q.local_addrlen);
ngtcp2_addr_init(&ctx->connected_path.remote,
- &sockaddr->sa_addr, sockaddr->addrlen);
+ &sockaddr->sa_addr, (socklen_t)sockaddr->addrlen);
rc = ngtcp2_conn_client_new(&ctx->qconn, &ctx->dcid, &ctx->scid,
&ctx->connected_path,
@@ -2211,7 +2256,7 @@ static CURLcode cf_connect_start(struct Curl_cfilter *cf,
#elif defined(USE_GNUTLS)
ngtcp2_conn_set_tls_native_handle(ctx->qconn, ctx->tls.gtls.session);
#else
- ngtcp2_conn_set_tls_native_handle(ctx->qconn, ctx->tls.ssl);
+ ngtcp2_conn_set_tls_native_handle(ctx->qconn, ctx->tls.wssl.handle);
#endif
ngtcp2_ccerr_default(&ctx->last_error);
@@ -2331,7 +2376,7 @@ static CURLcode cf_ngtcp2_query(struct Curl_cfilter *cf,
* by callback. QUIC counts the number over the lifetime of the
* connection, ever increasing.
* We count the *open* transfers plus the budget for new ones. */
- if(!ctx->qconn || ctx->conn_closed) {
+ if(!ctx->qconn || ctx->shutdown_started) {
*pres1 = 0;
}
else if(ctx->max_bidi_streams) {
@@ -2343,7 +2388,7 @@ static CURLcode cf_ngtcp2_query(struct Curl_cfilter *cf,
*pres1 = (max_streams > INT_MAX)? INT_MAX : (int)max_streams;
}
else /* transport params not arrived yet? take our default. */
- *pres1 = Curl_multi_max_concurrent_streams(data->multi);
+ *pres1 = (int)Curl_multi_max_concurrent_streams(data->multi);
CURL_TRC_CF(data, cf, "query conn[%" CURL_FORMAT_CURL_OFF_T "]: "
"MAX_CONCURRENT -> %d (%zu in use)",
cf->conn->connection_id, *pres1, CONN_INUSE(cf->conn));
@@ -2389,7 +2434,7 @@ static bool cf_ngtcp2_conn_is_alive(struct Curl_cfilter *cf,
CF_DATA_SAVE(save, cf, data);
*input_pending = FALSE;
- if(!ctx->qconn || ctx->conn_closed)
+ if(!ctx->qconn || ctx->shutdown_started)
goto out;
/* Both sides of the QUIC connection announce they max idle times in
@@ -2416,8 +2461,8 @@ static bool cf_ngtcp2_conn_is_alive(struct Curl_cfilter *cf,
alive = TRUE;
if(*input_pending) {
CURLcode result;
- /* This happens before we've sent off a request and the connection is
- not in use by any other transfer, there shouldn't be any data here,
+ /* This happens before we have sent off a request and the connection is
+ not in use by any other transfer, there should not be any data here,
only "protocol frames" */
*input_pending = FALSE;
result = cf_progress_ingress(cf, data, NULL);
@@ -2437,6 +2482,7 @@ struct Curl_cftype Curl_cft_http3 = {
cf_ngtcp2_destroy,
cf_ngtcp2_connect,
cf_ngtcp2_close,
+ cf_ngtcp2_shutdown,
Curl_cf_def_get_host,
cf_ngtcp2_adjust_pollset,
cf_ngtcp2_data_pending,
diff --git a/libs/libcurl/src/vquic/curl_osslq.c b/libs/libcurl/src/vquic/curl_osslq.c
index 3ffdfecf82..59c1f5053e 100644
--- a/libs/libcurl/src/vquic/curl_osslq.c
+++ b/libs/libcurl/src/vquic/curl_osslq.c
@@ -71,7 +71,7 @@
/* The pool keeps spares around and half of a full stream window
* seems good. More does not seem to improve performance.
* The benefit of the pool is that stream buffer to not keep
- * spares. So memory consumption goes down when streams run empty,
+ * spares. Memory consumption goes down when streams run empty,
* have a large upload done, etc. */
#define H3_STREAM_POOL_SPARES \
(H3_STREAM_WINDOW_SIZE / H3_STREAM_CHUNK_SIZE ) / 2
@@ -232,7 +232,7 @@ static CURLcode cf_osslq_stream_open(struct cf_osslq_stream *s,
if(!s->ssl) {
return CURLE_FAILED_INIT;
}
- s->id = SSL_get_stream_id(s->ssl);
+ s->id = (curl_int64_t)SSL_get_stream_id(s->ssl);
SSL_set_app_data(s->ssl, user_data);
return CURLE_OK;
}
@@ -294,10 +294,10 @@ struct cf_osslq_ctx {
size_t max_stream_window; /* max flow window for one stream */
uint64_t max_idle_ms; /* max idle time for QUIC connection */
BIT(got_first_byte); /* if first byte was received */
-#ifdef USE_OPENSSL
BIT(x509_store_setup); /* if x509 store has been set up */
BIT(protocol_shutdown); /* QUIC connection is shut down */
-#endif
+ BIT(need_recv); /* QUIC connection needs to receive */
+ BIT(need_send); /* QUIC connection needs to send */
};
static void cf_osslq_ctx_clear(struct cf_osslq_ctx *ctx)
@@ -316,6 +316,77 @@ static void cf_osslq_ctx_clear(struct cf_osslq_ctx *ctx)
ctx->call_data = save;
}
+static CURLcode cf_osslq_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data, bool *done)
+{
+ struct cf_osslq_ctx *ctx = cf->ctx;
+ struct cf_call_data save;
+ CURLcode result = CURLE_OK;
+ int rc;
+
+ CF_DATA_SAVE(save, cf, data);
+
+ if(cf->shutdown || ctx->protocol_shutdown) {
+ *done = TRUE;
+ return CURLE_OK;
+ }
+
+ CF_DATA_SAVE(save, cf, data);
+ *done = FALSE;
+ ctx->need_send = FALSE;
+ ctx->need_recv = FALSE;
+
+ rc = SSL_shutdown_ex(ctx->tls.ossl.ssl,
+ SSL_SHUTDOWN_FLAG_NO_BLOCK, NULL, 0);
+ if(rc == 0) { /* ongoing */
+ CURL_TRC_CF(data, cf, "shutdown ongoing");
+ ctx->need_recv = TRUE;
+ goto out;
+ }
+ else if(rc == 1) { /* done */
+ CURL_TRC_CF(data, cf, "shutdown finished");
+ *done = TRUE;
+ goto out;
+ }
+ else {
+ long sslerr;
+ char err_buffer[256];
+ int err = SSL_get_error(ctx->tls.ossl.ssl, rc);
+
+ switch(err) {
+ case SSL_ERROR_NONE:
+ case SSL_ERROR_ZERO_RETURN:
+ CURL_TRC_CF(data, cf, "shutdown not received, but closed");
+ *done = TRUE;
+ goto out;
+ case SSL_ERROR_WANT_READ:
+ /* SSL has send its notify and now wants to read the reply
+ * from the server. We are not really interested in that. */
+ CURL_TRC_CF(data, cf, "shutdown sent, want receive");
+ ctx->need_recv = TRUE;
+ goto out;
+ case SSL_ERROR_WANT_WRITE:
+ CURL_TRC_CF(data, cf, "shutdown send blocked");
+ ctx->need_send = TRUE;
+ goto out;
+ default:
+ /* We give up on this. */
+ sslerr = ERR_get_error();
+ CURL_TRC_CF(data, cf, "shutdown, ignore recv error: '%s', errno %d",
+ (sslerr ?
+ osslq_strerror(sslerr, err_buffer, sizeof(err_buffer)) :
+ osslq_SSL_ERROR_to_str(err)),
+ SOCKERRNO);
+ *done = TRUE;
+ result = CURLE_OK;
+ goto out;
+ }
+ }
+out:
+ CF_DATA_RESTORE(cf, save);
+ return result;
+}
+
static void cf_osslq_close(struct Curl_cfilter *cf, struct Curl_easy *data)
{
struct cf_osslq_ctx *ctx = cf->ctx;
@@ -323,8 +394,13 @@ static void cf_osslq_close(struct Curl_cfilter *cf, struct Curl_easy *data)
CF_DATA_SAVE(save, cf, data);
if(ctx && ctx->tls.ossl.ssl) {
- /* TODO: send connection close */
CURL_TRC_CF(data, cf, "cf_osslq_close()");
+ if(!cf->shutdown && !ctx->protocol_shutdown) {
+ /* last best effort, which OpenSSL calls a "rapid" shutdown. */
+ SSL_shutdown_ex(ctx->tls.ossl.ssl,
+ (SSL_SHUTDOWN_FLAG_NO_BLOCK | SSL_SHUTDOWN_FLAG_RAPID),
+ NULL, 0);
+ }
cf_osslq_ctx_clear(ctx);
}
@@ -355,12 +431,12 @@ static CURLcode cf_osslq_h3conn_add_stream(struct cf_osslq_h3conn *h3,
struct Curl_easy *data)
{
struct cf_osslq_ctx *ctx = cf->ctx;
- int64_t stream_id = SSL_get_stream_id(stream_ssl);
+ curl_int64_t stream_id = (curl_int64_t)SSL_get_stream_id(stream_ssl);
if(h3->remote_ctrl_n >= ARRAYSIZE(h3->remote_ctrl)) {
/* rejected, we are full */
CURL_TRC_CF(data, cf, "[%" CURL_PRId64 "] rejecting remote stream",
- (curl_int64_t)stream_id);
+ stream_id);
SSL_free(stream_ssl);
return CURLE_FAILED_INIT;
}
@@ -371,12 +447,12 @@ static CURLcode cf_osslq_h3conn_add_stream(struct cf_osslq_h3conn *h3,
nstream->ssl = stream_ssl;
Curl_bufq_initp(&nstream->recvbuf, &ctx->stream_bufcp, 1, BUFQ_OPT_NONE);
CURL_TRC_CF(data, cf, "[%" CURL_PRId64 "] accepted remote uni stream",
- (curl_int64_t)stream_id);
+ stream_id);
break;
}
default:
CURL_TRC_CF(data, cf, "[%" CURL_PRId64 "] reject remote non-uni-read"
- " stream", (curl_int64_t)stream_id);
+ " stream", stream_id);
SSL_free(stream_ssl);
return CURLE_FAILED_INIT;
}
@@ -440,7 +516,7 @@ static CURLcode cf_osslq_ssl_err(struct Curl_cfilter *cf,
/* detail is already set to the SSL error above */
- /* If we e.g. use SSLv2 request-method and the server doesn't like us
+ /* If we e.g. use SSLv2 request-method and the server does not like us
* (RST connection, etc.), OpenSSL gives no explanation whatsoever and
* the SO_ERROR is also lost.
*/
@@ -484,7 +560,6 @@ struct h3_stream_ctx {
struct bufq recvbuf; /* h3 response body */
struct h1_req_parser h1; /* h1 request parsing */
size_t sendbuf_len_in_flight; /* sendbuf amount "in flight" */
- size_t upload_blocked_len; /* the amount written last and EGAINed */
size_t recv_buf_nonflow; /* buffered bytes, not counting for flow control */
curl_uint64_t error3; /* HTTP/3 stream error code */
curl_off_t upload_left; /* number of request bytes left to upload */
@@ -521,7 +596,7 @@ static CURLcode h3_data_setup(struct Curl_cfilter *cf,
struct cf_osslq_ctx *ctx = cf->ctx;
struct h3_stream_ctx *stream = H3_STREAM_CTX(ctx, data);
- if(!data || !data->req.p.http) {
+ if(!data) {
failf(data, "initialization failure, transfer not http initialized");
return CURLE_FAILED_INIT;
}
@@ -829,7 +904,7 @@ static int cb_h3_end_headers(nghttp3_conn *conn, int64_t sid,
if(!stream)
return 0;
- /* add a CRLF only if we've received some headers */
+ /* add a CRLF only if we have received some headers */
result = write_resp_raw(cf, data, "\r\n", 2, FALSE);
if(result) {
return -1;
@@ -977,8 +1052,8 @@ static int cb_h3_acked_stream_data(nghttp3_conn *conn, int64_t stream_id,
Curl_bufq_skip(&stream->sendbuf, skiplen);
stream->sendbuf_len_in_flight -= skiplen;
- /* Everything ACKed, we resume upload processing */
- if(!stream->sendbuf_len_in_flight) {
+ /* Resume upload processing if we have more data to send */
+ if(stream->sendbuf_len_in_flight < Curl_bufq_len(&stream->sendbuf)) {
int rv = nghttp3_conn_resume_stream(conn, stream_id);
if(rv && rv != NGHTTP3_ERR_STREAM_NOT_FOUND) {
return NGHTTP3_ERR_CALLBACK_FAILURE;
@@ -1442,19 +1517,12 @@ static CURLcode h3_send_streams(struct Curl_cfilter *cf,
for(i = 0; (i < n) && !blocked; ++i) {
/* Without stream->s.ssl, we closed that already, so
* pretend the write did succeed. */
-#ifdef SSL_WRITE_FLAG_CONCLUDE
- /* Since OpenSSL v3.3.x, on last chunk set EOS if needed */
uint64_t flags = (eos && ((i + 1) == n))? SSL_WRITE_FLAG_CONCLUDE : 0;
written = vec[i].len;
ok = !s->ssl || SSL_write_ex2(s->ssl, vec[i].base, vec[i].len, flags,
&written);
if(ok && flags & SSL_WRITE_FLAG_CONCLUDE)
eos_written = TRUE;
-#else
- written = vec[i].len;
- ok = !s->ssl || SSL_write_ex(s->ssl, vec[i].base, vec[i].len,
- &written);
-#endif
if(ok) {
/* As OpenSSL buffers the data, we count this as acknowledged
* from nghttp3's point of view */
@@ -1766,7 +1834,7 @@ static ssize_t h3_stream_open(struct Curl_cfilter *cf,
*err = cf_osslq_stream_open(&stream->s, ctx->tls.ossl.ssl, 0,
&ctx->stream_bufcp, data);
if(*err) {
- failf(data, "can't get bidi streams");
+ failf(data, "cannot get bidi streams");
*err = CURLE_SEND_ERROR;
goto out;
}
@@ -1867,21 +1935,6 @@ static ssize_t cf_osslq_send(struct Curl_cfilter *cf, struct Curl_easy *data,
}
stream = H3_STREAM_CTX(ctx, data);
}
- else if(stream->upload_blocked_len) {
- /* the data in `buf` has already been submitted or added to the
- * buffers, but have been EAGAINed on the last invocation. */
- DEBUGASSERT(len >= stream->upload_blocked_len);
- if(len < stream->upload_blocked_len) {
- /* Did we get called again with a smaller `len`? This should not
- * happen. We are not prepared to handle that. */
- failf(data, "HTTP/3 send again with decreased length");
- *err = CURLE_HTTP3;
- nwritten = -1;
- goto out;
- }
- nwritten = (ssize_t)stream->upload_blocked_len;
- stream->upload_blocked_len = 0;
- }
else if(stream->closed) {
if(stream->resp_hds_complete) {
/* Server decided to close the stream after having sent us a final
@@ -1919,18 +1972,6 @@ static ssize_t cf_osslq_send(struct Curl_cfilter *cf, struct Curl_easy *data,
nwritten = -1;
}
- if(stream && nwritten > 0 && stream->sendbuf_len_in_flight) {
- /* We have unacknowledged DATA and cannot report success to our
- * caller. Instead we EAGAIN and remember how much we have already
- * "written" into our various internal connection buffers. */
- stream->upload_blocked_len = nwritten;
- CURL_TRC_CF(data, cf, "[%" CURL_PRId64 "] cf_send(len=%zu), "
- "%zu bytes in flight -> EGAIN", stream->s.id, len,
- stream->sendbuf_len_in_flight);
- *err = CURLE_AGAIN;
- nwritten = -1;
- }
-
out:
result = check_and_set_expiry(cf, data);
CURL_TRC_CF(data, cf, "[%" CURL_PRId64 "] cf_send(len=%zu) -> %zd, %d",
@@ -2090,7 +2131,8 @@ static CURLcode cf_osslq_data_event(struct Curl_cfilter *cf,
struct h3_stream_ctx *stream = H3_STREAM_CTX(ctx, data);
if(stream && !stream->send_closed) {
stream->send_closed = TRUE;
- stream->upload_left = Curl_bufq_len(&stream->sendbuf);
+ stream->upload_left = Curl_bufq_len(&stream->sendbuf) -
+ stream->sendbuf_len_in_flight;
(void)nghttp3_conn_resume_stream(ctx->h3.conn, stream->s.id);
}
break;
@@ -2149,8 +2191,8 @@ static bool cf_osslq_conn_is_alive(struct Curl_cfilter *cf,
alive = TRUE;
if(*input_pending) {
CURLcode result;
- /* This happens before we've sent off a request and the connection is
- not in use by any other transfer, there shouldn't be any data here,
+ /* This happens before we have sent off a request and the connection is
+ not in use by any other transfer, there should not be any data here,
only "protocol frames" */
*input_pending = FALSE;
result = cf_progress_ingress(cf, data);
@@ -2189,6 +2231,10 @@ static void cf_osslq_adjust_pollset(struct Curl_cfilter *cf,
SSL_net_read_desired(ctx->tls.ossl.ssl),
SSL_net_write_desired(ctx->tls.ossl.ssl));
}
+ else if(ctx->need_recv || ctx->need_send) {
+ Curl_pollset_set(data, ps, ctx->q.sockfd,
+ ctx->need_recv, ctx->need_send);
+ }
}
}
@@ -2252,6 +2298,7 @@ struct Curl_cftype Curl_cft_http3 = {
cf_osslq_destroy,
cf_osslq_connect,
cf_osslq_close,
+ cf_osslq_shutdown,
Curl_cf_def_get_host,
cf_osslq_adjust_pollset,
cf_osslq_data_pending,
diff --git a/libs/libcurl/src/vquic/curl_quiche.c b/libs/libcurl/src/vquic/curl_quiche.c
index 0a2fab2213..66e3592bb2 100644
--- a/libs/libcurl/src/vquic/curl_quiche.c
+++ b/libs/libcurl/src/vquic/curl_quiche.c
@@ -64,11 +64,10 @@
#define H3_STREAM_WINDOW_SIZE (128 * 1024)
#define H3_STREAM_CHUNK_SIZE (16 * 1024)
-/* The pool keeps spares around and half of a full stream windows
- * seems good. More does not seem to improve performance.
- * The benefit of the pool is that stream buffer to not keep
- * spares. So memory consumption goes down when streams run empty,
- * have a large upload done, etc. */
+/* The pool keeps spares around and half of a full stream windows seems good.
+ * More does not seem to improve performance. The benefit of the pool is that
+ * stream buffer to not keep spares. Memory consumption goes down when streams
+ * run empty, have a large upload done, etc. */
#define H3_STREAM_POOL_SPARES \
(H3_STREAM_WINDOW_SIZE / H3_STREAM_CHUNK_SIZE ) / 2
/* Receive and Send max number of chunks just follows from the
@@ -103,6 +102,7 @@ struct cf_quiche_ctx {
curl_off_t data_recvd;
BIT(goaway); /* got GOAWAY from server */
BIT(x509_store_setup); /* if x509 store has been set up */
+ BIT(shutdown_started); /* queued shutdown packets */
};
#ifdef DEBUG_QUICHE
@@ -1120,7 +1120,7 @@ out:
nwritten = -1;
}
CURL_TRC_CF(data, cf, "[%" CURL_PRIu64 "] cf_send(len=%zu) -> %zd, %d",
- stream? stream->id : -1, len, nwritten, *err);
+ stream? stream->id : (uint64_t)~0, len, nwritten, *err);
return nwritten;
}
@@ -1271,7 +1271,7 @@ static CURLcode cf_connect_start(struct Curl_cfilter *cf,
ctx->cfg = quiche_config_new(QUICHE_PROTOCOL_VERSION);
if(!ctx->cfg) {
- failf(data, "can't create quiche config");
+ failf(data, "cannot create quiche config");
return CURLE_FAILED_INIT;
}
quiche_config_enable_pacing(ctx->cfg, false);
@@ -1322,7 +1322,7 @@ static CURLcode cf_connect_start(struct Curl_cfilter *cf,
&sockaddr->sa_addr, sockaddr->addrlen,
ctx->cfg, ctx->tls.ossl.ssl, false);
if(!ctx->qconn) {
- failf(data, "can't create quiche connection");
+ failf(data, "cannot create quiche connection");
return CURLE_OUT_OF_MEMORY;
}
@@ -1464,18 +1464,60 @@ out:
return result;
}
+static CURLcode cf_quiche_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data, bool *done)
+{
+ struct cf_quiche_ctx *ctx = cf->ctx;
+ CURLcode result = CURLE_OK;
+
+ if(cf->shutdown || !ctx || !ctx->qconn) {
+ *done = TRUE;
+ return CURLE_OK;
+ }
+
+ *done = FALSE;
+ if(!ctx->shutdown_started) {
+ int err;
+
+ ctx->shutdown_started = TRUE;
+ vquic_ctx_update_time(&ctx->q);
+ err = quiche_conn_close(ctx->qconn, TRUE, 0, NULL, 0);
+ if(err) {
+ CURL_TRC_CF(data, cf, "error %d adding shutdown packet, "
+ "aborting shutdown", err);
+ result = CURLE_SEND_ERROR;
+ goto out;
+ }
+ }
+
+ if(!Curl_bufq_is_empty(&ctx->q.sendbuf)) {
+ CURL_TRC_CF(data, cf, "shutdown, flushing sendbuf");
+ result = cf_flush_egress(cf, data);
+ if(result)
+ goto out;
+ }
+
+ if(Curl_bufq_is_empty(&ctx->q.sendbuf)) {
+ /* sent everything, quiche does not seem to support a graceful
+ * shutdown waiting for a reply, so ware done. */
+ CURL_TRC_CF(data, cf, "shutdown completely sent off, done");
+ *done = TRUE;
+ }
+ else {
+ CURL_TRC_CF(data, cf, "shutdown sending blocked");
+ }
+
+out:
+ return result;
+}
+
static void cf_quiche_close(struct Curl_cfilter *cf, struct Curl_easy *data)
{
struct cf_quiche_ctx *ctx = cf->ctx;
if(ctx) {
- if(ctx->qconn) {
- vquic_ctx_update_time(&ctx->q);
- (void)quiche_conn_close(ctx->qconn, TRUE, 0, NULL, 0);
- /* flushing the egress is not a failsafe way to deliver all the
- outstanding packets, but we also don't want to get stuck here... */
- (void)cf_flush_egress(cf, data);
- }
+ bool done;
+ (void)cf_quiche_shutdown(cf, data, &done);
cf_quiche_ctx_clear(ctx);
}
}
@@ -1559,8 +1601,8 @@ static bool cf_quiche_conn_is_alive(struct Curl_cfilter *cf,
return FALSE;
if(*input_pending) {
- /* This happens before we've sent off a request and the connection is
- not in use by any other transfer, there shouldn't be any data here,
+ /* This happens before we have sent off a request and the connection is
+ not in use by any other transfer, there should not be any data here,
only "protocol frames" */
*input_pending = FALSE;
if(cf_process_ingress(cf, data))
@@ -1580,6 +1622,7 @@ struct Curl_cftype Curl_cft_http3 = {
cf_quiche_destroy,
cf_quiche_connect,
cf_quiche_close,
+ cf_quiche_shutdown,
Curl_cf_def_get_host,
cf_quiche_adjust_pollset,
cf_quiche_data_pending,
diff --git a/libs/libcurl/src/vquic/vquic-tls.c b/libs/libcurl/src/vquic/vquic-tls.c
index df8a4f5c19..d3ddce2c53 100644
--- a/libs/libcurl/src/vquic/vquic-tls.c
+++ b/libs/libcurl/src/vquic/vquic-tls.c
@@ -76,7 +76,7 @@ static void keylog_callback(const WOLFSSL *ssl, const char *line)
}
#endif
-static CURLcode curl_wssl_init_ctx(struct curl_tls_ctx *ctx,
+static CURLcode Curl_wssl_init_ctx(struct curl_tls_ctx *ctx,
struct Curl_cfilter *cf,
struct Curl_easy *data,
Curl_vquic_tls_ctx_setup *cb_setup,
@@ -91,8 +91,8 @@ static CURLcode curl_wssl_init_ctx(struct curl_tls_ctx *ctx,
goto out;
}
- ctx->ssl_ctx = wolfSSL_CTX_new(wolfTLSv1_3_client_method());
- if(!ctx->ssl_ctx) {
+ ctx->wssl.ctx = wolfSSL_CTX_new(wolfTLSv1_3_client_method());
+ if(!ctx->wssl.ctx) {
result = CURLE_OUT_OF_MEMORY;
goto out;
}
@@ -103,9 +103,9 @@ static CURLcode curl_wssl_init_ctx(struct curl_tls_ctx *ctx,
goto out;
}
- wolfSSL_CTX_set_default_verify_paths(ctx->ssl_ctx);
+ wolfSSL_CTX_set_default_verify_paths(ctx->wssl.ctx);
- if(wolfSSL_CTX_set_cipher_list(ctx->ssl_ctx, conn_config->cipher_list13 ?
+ if(wolfSSL_CTX_set_cipher_list(ctx->wssl.ctx, conn_config->cipher_list13 ?
conn_config->cipher_list13 :
QUIC_CIPHERS) != 1) {
char error_buffer[256];
@@ -115,7 +115,7 @@ static CURLcode curl_wssl_init_ctx(struct curl_tls_ctx *ctx,
goto out;
}
- if(wolfSSL_CTX_set1_groups_list(ctx->ssl_ctx, conn_config->curves ?
+ if(wolfSSL_CTX_set1_groups_list(ctx->wssl.ctx, conn_config->curves ?
conn_config->curves :
(char *)QUIC_GROUPS) != 1) {
failf(data, "wolfSSL failed to set curves");
@@ -127,7 +127,7 @@ static CURLcode curl_wssl_init_ctx(struct curl_tls_ctx *ctx,
Curl_tls_keylog_open();
if(Curl_tls_keylog_enabled()) {
#if defined(HAVE_SECRET_CALLBACK)
- wolfSSL_CTX_set_keylog_callback(ctx->ssl_ctx, keylog_callback);
+ wolfSSL_CTX_set_keylog_callback(ctx->wssl.ctx, keylog_callback);
#else
failf(data, "wolfSSL was built without keylog callback");
result = CURLE_NOT_BUILT_IN;
@@ -139,12 +139,12 @@ static CURLcode curl_wssl_init_ctx(struct curl_tls_ctx *ctx,
const char * const ssl_cafile = conn_config->CAfile;
const char * const ssl_capath = conn_config->CApath;
- wolfSSL_CTX_set_verify(ctx->ssl_ctx, SSL_VERIFY_PEER, NULL);
+ wolfSSL_CTX_set_verify(ctx->wssl.ctx, SSL_VERIFY_PEER, NULL);
if(ssl_cafile || ssl_capath) {
/* tell wolfSSL where to find CA certificates that are used to verify
the server's certificate. */
int rc =
- wolfSSL_CTX_load_verify_locations_ex(ctx->ssl_ctx, ssl_cafile,
+ wolfSSL_CTX_load_verify_locations_ex(ctx->wssl.ctx, ssl_cafile,
ssl_capath,
WOLFSSL_LOAD_FLAG_IGNORE_ERR);
if(SSL_SUCCESS != rc) {
@@ -161,20 +161,20 @@ static CURLcode curl_wssl_init_ctx(struct curl_tls_ctx *ctx,
}
#ifdef CURL_CA_FALLBACK
else {
- /* verifying the peer without any CA certificates won't work so
+ /* verifying the peer without any CA certificates will not work so
use wolfssl's built-in default as fallback */
- wolfSSL_CTX_set_default_verify_paths(ctx->ssl_ctx);
+ wolfSSL_CTX_set_default_verify_paths(ctx->wssl.ctx);
}
#endif
}
else {
- wolfSSL_CTX_set_verify(ctx->ssl_ctx, SSL_VERIFY_NONE, NULL);
+ wolfSSL_CTX_set_verify(ctx->wssl.ctx, SSL_VERIFY_NONE, NULL);
}
/* give application a chance to interfere with SSL set up. */
if(data->set.ssl.fsslctx) {
Curl_set_in_callback(data, true);
- result = (*data->set.ssl.fsslctx)(data, ctx->ssl_ctx,
+ result = (*data->set.ssl.fsslctx)(data, ctx->wssl.ctx,
data->set.ssl.fsslctxp);
Curl_set_in_callback(data, false);
if(result) {
@@ -185,36 +185,36 @@ static CURLcode curl_wssl_init_ctx(struct curl_tls_ctx *ctx,
result = CURLE_OK;
out:
- if(result && ctx->ssl_ctx) {
- SSL_CTX_free(ctx->ssl_ctx);
- ctx->ssl_ctx = NULL;
+ if(result && ctx->wssl.ctx) {
+ SSL_CTX_free(ctx->wssl.ctx);
+ ctx->wssl.ctx = NULL;
}
return result;
}
/** SSL callbacks ***/
-static CURLcode curl_wssl_init_ssl(struct curl_tls_ctx *ctx,
+static CURLcode Curl_wssl_init_ssl(struct curl_tls_ctx *ctx,
struct Curl_easy *data,
struct ssl_peer *peer,
const char *alpn, size_t alpn_len,
void *user_data)
{
(void)data;
- DEBUGASSERT(!ctx->ssl);
- DEBUGASSERT(ctx->ssl_ctx);
- ctx->ssl = wolfSSL_new(ctx->ssl_ctx);
+ DEBUGASSERT(!ctx->wssl.handle);
+ DEBUGASSERT(ctx->wssl.ctx);
+ ctx->wssl.handle = wolfSSL_new(ctx->wssl.ctx);
- wolfSSL_set_app_data(ctx->ssl, user_data);
- wolfSSL_set_connect_state(ctx->ssl);
- wolfSSL_set_quic_use_legacy_codepoint(ctx->ssl, 0);
+ wolfSSL_set_app_data(ctx->wssl.handle, user_data);
+ wolfSSL_set_connect_state(ctx->wssl.handle);
+ wolfSSL_set_quic_use_legacy_codepoint(ctx->wssl.handle, 0);
if(alpn)
- wolfSSL_set_alpn_protos(ctx->ssl, (const unsigned char *)alpn,
- (int)alpn_len);
+ wolfSSL_set_alpn_protos(ctx->wssl.handle, (const unsigned char *)alpn,
+ (unsigned int)alpn_len);
if(peer->sni) {
- wolfSSL_UseSNI(ctx->ssl, WOLFSSL_SNI_HOST_NAME,
+ wolfSSL_UseSNI(ctx->wssl.handle, WOLFSSL_SNI_HOST_NAME,
peer->sni, (unsigned short)strlen(peer->sni));
}
@@ -243,11 +243,11 @@ CURLcode Curl_vquic_tls_init(struct curl_tls_ctx *ctx,
(const unsigned char *)alpn, alpn_len,
cb_setup, cb_user_data, ssl_user_data);
#elif defined(USE_WOLFSSL)
- result = curl_wssl_init_ctx(ctx, cf, data, cb_setup, cb_user_data);
+ result = Curl_wssl_init_ctx(ctx, cf, data, cb_setup, cb_user_data);
if(result)
return result;
- return curl_wssl_init_ssl(ctx, data, peer, alpn, alpn_len, ssl_user_data);
+ return Curl_wssl_init_ssl(ctx, data, peer, alpn, alpn_len, ssl_user_data);
#else
#error "no TLS lib in used, should not happen"
return CURLE_FAILED_INIT;
@@ -262,15 +262,14 @@ void Curl_vquic_tls_cleanup(struct curl_tls_ctx *ctx)
if(ctx->ossl.ssl_ctx)
SSL_CTX_free(ctx->ossl.ssl_ctx);
#elif defined(USE_GNUTLS)
- if(ctx->gtls.cred)
- gnutls_certificate_free_credentials(ctx->gtls.cred);
if(ctx->gtls.session)
gnutls_deinit(ctx->gtls.session);
+ Curl_gtls_shared_creds_free(&ctx->gtls.shared_creds);
#elif defined(USE_WOLFSSL)
- if(ctx->ssl)
- wolfSSL_free(ctx->ssl);
- if(ctx->ssl_ctx)
- wolfSSL_CTX_free(ctx->ssl_ctx);
+ if(ctx->wssl.handle)
+ wolfSSL_free(ctx->wssl.handle);
+ if(ctx->wssl.ctx)
+ wolfSSL_CTX_free(ctx->wssl.ctx);
#endif
memset(ctx, 0, sizeof(*ctx));
}
@@ -286,8 +285,14 @@ CURLcode Curl_vquic_tls_before_recv(struct curl_tls_ctx *ctx,
return result;
ctx->ossl.x509_store_setup = TRUE;
}
+#elif defined(USE_WOLFSSL)
+ if(!ctx->wssl.x509_store_setup) {
+ CURLcode result = Curl_wssl_setup_x509_store(cf, data, &ctx->wssl);
+ if(result)
+ return result;
+ }
#elif defined(USE_GNUTLS)
- if(!ctx->gtls.trust_setup) {
+ if(!ctx->gtls.shared_creds->trust_setup) {
CURLcode result = Curl_gtls_client_trust_setup(cf, data, &ctx->gtls);
if(result)
return result;
@@ -325,7 +330,7 @@ CURLcode Curl_vquic_tls_verify_peer(struct curl_tls_ctx *ctx,
(void)data;
if(conn_config->verifyhost) {
if(peer->sni) {
- WOLFSSL_X509* cert = wolfSSL_get_peer_certificate(ctx->ssl);
+ WOLFSSL_X509* cert = wolfSSL_get_peer_certificate(ctx->wssl.handle);
if(wolfSSL_X509_check_host(cert, peer->sni, strlen(peer->sni), 0, NULL)
== WOLFSSL_FAILURE) {
result = CURLE_PEER_FAILED_VERIFICATION;
diff --git a/libs/libcurl/src/vquic/vquic-tls.h b/libs/libcurl/src/vquic/vquic-tls.h
index e7d33d4b46..74db0e0ada 100644
--- a/libs/libcurl/src/vquic/vquic-tls.h
+++ b/libs/libcurl/src/vquic/vquic-tls.h
@@ -31,14 +31,15 @@
#if defined(USE_HTTP3) && \
(defined(USE_OPENSSL) || defined(USE_GNUTLS) || defined(USE_WOLFSSL))
+#include "vtls/wolfssl.h"
+
struct curl_tls_ctx {
#ifdef USE_OPENSSL
struct ossl_ctx ossl;
#elif defined(USE_GNUTLS)
struct gtls_ctx gtls;
#elif defined(USE_WOLFSSL)
- WOLFSSL_CTX *ssl_ctx;
- WOLFSSL *ssl;
+ struct wolfssl_ctx wssl;
#endif
};
diff --git a/libs/libcurl/src/vquic/vquic.c b/libs/libcurl/src/vquic/vquic.c
index feff3395cc..13d9929ee2 100644
--- a/libs/libcurl/src/vquic/vquic.c
+++ b/libs/libcurl/src/vquic/vquic.c
@@ -36,6 +36,9 @@
#include "curl_setup.h"
+#ifdef HAVE_NETINET_UDP_H
+#include <netinet/udp.h>
+#endif
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
@@ -329,6 +332,36 @@ CURLcode vquic_send_tail_split(struct Curl_cfilter *cf, struct Curl_easy *data,
return vquic_flush(cf, data, qctx);
}
+#if defined(HAVE_SENDMMSG) || defined(HAVE_SENDMSG)
+static size_t msghdr_get_udp_gro(struct msghdr *msg)
+{
+ int gso_size = 0;
+#if defined(__linux__) && defined(UDP_GRO)
+ struct cmsghdr *cmsg;
+
+ /* Workaround musl CMSG_NXTHDR issue */
+#ifndef __GLIBC__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wsign-compare"
+#pragma clang diagnostic ignored "-Wcast-align"
+#endif
+ for(cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+#ifndef __GLIBC__
+#pragma clang diagnostic pop
+#endif
+ if(cmsg->cmsg_level == SOL_UDP && cmsg->cmsg_type == UDP_GRO) {
+ memcpy(&gso_size, CMSG_DATA(cmsg), sizeof(gso_size));
+
+ break;
+ }
+ }
+#endif
+ (void)msg;
+
+ return (size_t)gso_size;
+}
+#endif
+
#ifdef HAVE_SENDMMSG
static CURLcode recvmmsg_packets(struct Curl_cfilter *cf,
struct Curl_easy *data,
@@ -339,12 +372,16 @@ static CURLcode recvmmsg_packets(struct Curl_cfilter *cf,
#define MMSG_NUM 64
struct iovec msg_iov[MMSG_NUM];
struct mmsghdr mmsg[MMSG_NUM];
+ uint8_t msg_ctrl[MMSG_NUM * CMSG_SPACE(sizeof(uint16_t))];
uint8_t bufs[MMSG_NUM][2*1024];
struct sockaddr_storage remote_addr[MMSG_NUM];
size_t total_nread, pkts;
int mcount, i, n;
char errstr[STRERROR_LEN];
CURLcode result = CURLE_OK;
+ size_t gso_size;
+ size_t pktlen;
+ size_t offset, to;
DEBUGASSERT(max_pkts > 0);
pkts = 0;
@@ -359,6 +396,8 @@ static CURLcode recvmmsg_packets(struct Curl_cfilter *cf,
mmsg[i].msg_hdr.msg_iovlen = 1;
mmsg[i].msg_hdr.msg_name = &remote_addr[i];
mmsg[i].msg_hdr.msg_namelen = sizeof(remote_addr[i]);
+ mmsg[i].msg_hdr.msg_control = &msg_ctrl[i];
+ mmsg[i].msg_hdr.msg_controllen = CMSG_SPACE(sizeof(uint16_t));
}
while((mcount = recvmmsg(qctx->sockfd, mmsg, n, 0, NULL)) == -1 &&
@@ -385,14 +424,30 @@ static CURLcode recvmmsg_packets(struct Curl_cfilter *cf,
}
CURL_TRC_CF(data, cf, "recvmmsg() -> %d packets", mcount);
- pkts += mcount;
for(i = 0; i < mcount; ++i) {
total_nread += mmsg[i].msg_len;
- result = recv_cb(bufs[i], mmsg[i].msg_len,
- mmsg[i].msg_hdr.msg_name, mmsg[i].msg_hdr.msg_namelen,
- 0, userp);
- if(result)
- goto out;
+
+ gso_size = msghdr_get_udp_gro(&mmsg[i].msg_hdr);
+ if(gso_size == 0) {
+ gso_size = mmsg[i].msg_len;
+ }
+
+ for(offset = 0; offset < mmsg[i].msg_len; offset = to) {
+ ++pkts;
+
+ to = offset + gso_size;
+ if(to > mmsg[i].msg_len) {
+ pktlen = mmsg[i].msg_len - offset;
+ }
+ else {
+ pktlen = gso_size;
+ }
+
+ result = recv_cb(bufs[i] + offset, pktlen, mmsg[i].msg_hdr.msg_name,
+ mmsg[i].msg_hdr.msg_namelen, 0, userp);
+ if(result)
+ goto out;
+ }
}
}
@@ -418,6 +473,10 @@ static CURLcode recvmsg_packets(struct Curl_cfilter *cf,
ssize_t nread;
char errstr[STRERROR_LEN];
CURLcode result = CURLE_OK;
+ uint8_t msg_ctrl[CMSG_SPACE(sizeof(uint16_t))];
+ size_t gso_size;
+ size_t pktlen;
+ size_t offset, to;
msg_iov.iov_base = buf;
msg_iov.iov_len = (int)sizeof(buf);
@@ -425,11 +484,13 @@ static CURLcode recvmsg_packets(struct Curl_cfilter *cf,
memset(&msg, 0, sizeof(msg));
msg.msg_iov = &msg_iov;
msg.msg_iovlen = 1;
+ msg.msg_control = msg_ctrl;
DEBUGASSERT(max_pkts > 0);
for(pkts = 0, total_nread = 0; pkts < max_pkts;) {
msg.msg_name = &remote_addr;
msg.msg_namelen = sizeof(remote_addr);
+ msg.msg_controllen = sizeof(msg_ctrl);
while((nread = recvmsg(qctx->sockfd, &msg, 0)) == -1 &&
SOCKERRNO == EINTR)
;
@@ -452,12 +513,29 @@ static CURLcode recvmsg_packets(struct Curl_cfilter *cf,
goto out;
}
- ++pkts;
total_nread += (size_t)nread;
- result = recv_cb(buf, (size_t)nread, msg.msg_name, msg.msg_namelen,
- 0, userp);
- if(result)
- goto out;
+
+ gso_size = msghdr_get_udp_gro(&msg);
+ if(gso_size == 0) {
+ gso_size = (size_t)nread;
+ }
+
+ for(offset = 0; offset < (size_t)nread; offset = to) {
+ ++pkts;
+
+ to = offset + gso_size;
+ if(to > (size_t)nread) {
+ pktlen = (size_t)nread - offset;
+ }
+ else {
+ pktlen = gso_size;
+ }
+
+ result =
+ recv_cb(buf + offset, pktlen, msg.msg_name, msg.msg_namelen, 0, userp);
+ if(result)
+ goto out;
+ }
}
out:
@@ -655,7 +733,7 @@ CURLcode Curl_conn_may_http3(struct Curl_easy *data,
return CURLE_URL_MALFORMAT;
}
if(conn->bits.httpproxy && conn->bits.tunnel_proxy) {
- failf(data, "HTTP/3 is not supported over a HTTP proxy");
+ failf(data, "HTTP/3 is not supported over an HTTP proxy");
return CURLE_URL_MALFORMAT;
}
#endif
diff --git a/libs/libcurl/src/vssh/libssh.c b/libs/libcurl/src/vssh/libssh.c
index 9ba38ac257..8cebb44ac1 100644
--- a/libs/libcurl/src/vssh/libssh.c
+++ b/libs/libcurl/src/vssh/libssh.c
@@ -388,28 +388,25 @@ static int myssh_is_known(struct Curl_easy *data)
goto cleanup;
}
- if(data->set.ssl.primary.verifyhost != TRUE) {
- rc = SSH_OK;
- goto cleanup;
- }
+ if(data->set.str[STRING_SSH_KNOWNHOSTS]) {
#if LIBSSH_VERSION_INT >= SSH_VERSION_INT(0,9,0)
- /* Get the known_key from the known hosts file */
- vstate = ssh_session_get_known_hosts_entry(sshc->ssh_session,
- &knownhostsentry);
-
- /* Case an entry was found in a known hosts file */
- if(knownhostsentry) {
- if(knownhostsentry->publickey) {
- rc = ssh_pki_export_pubkey_base64(knownhostsentry->publickey,
- &known_base64);
- if(rc != SSH_OK) {
- goto cleanup;
- }
- knownkey.key = known_base64;
- knownkey.len = strlen(known_base64);
+ /* Get the known_key from the known hosts file */
+ vstate = ssh_session_get_known_hosts_entry(sshc->ssh_session,
+ &knownhostsentry);
+
+ /* Case an entry was found in a known hosts file */
+ if(knownhostsentry) {
+ if(knownhostsentry->publickey) {
+ rc = ssh_pki_export_pubkey_base64(knownhostsentry->publickey,
+ &known_base64);
+ if(rc != SSH_OK) {
+ goto cleanup;
+ }
+ knownkey.key = known_base64;
+ knownkey.len = strlen(known_base64);
- switch(ssh_key_type(knownhostsentry->publickey)) {
+ switch(ssh_key_type(knownhostsentry->publickey)) {
case SSH_KEYTYPE_RSA:
knownkey.keytype = CURLKHTYPE_RSA;
break;
@@ -431,12 +428,12 @@ static int myssh_is_known(struct Curl_easy *data)
default:
rc = SSH_ERROR;
goto cleanup;
+ }
+ knownkeyp = &knownkey;
}
- knownkeyp = &knownkey;
}
- }
- switch(vstate) {
+ switch(vstate) {
case SSH_KNOWN_HOSTS_OK:
keymatch = CURLKHMATCH_OK;
break;
@@ -446,14 +443,14 @@ static int myssh_is_known(struct Curl_easy *data)
case SSH_KNOWN_HOSTS_ERROR:
keymatch = CURLKHMATCH_MISSING;
break;
- default:
+ default:
keymatch = CURLKHMATCH_MISMATCH;
break;
- }
+ }
#else
- vstate = ssh_is_server_known(sshc->ssh_session);
- switch(vstate) {
+ vstate = ssh_is_server_known(sshc->ssh_session);
+ switch(vstate) {
case SSH_SERVER_KNOWN_OK:
keymatch = CURLKHMATCH_OK;
break;
@@ -461,21 +458,21 @@ static int myssh_is_known(struct Curl_easy *data)
case SSH_SERVER_NOT_KNOWN:
keymatch = CURLKHMATCH_MISSING;
break;
- default:
+ default:
keymatch = CURLKHMATCH_MISMATCH;
break;
- }
+ }
#endif
- if(func) { /* use callback to determine action */
- rc = ssh_pki_export_pubkey_base64(pubkey, &found_base64);
- if(rc != SSH_OK)
- goto cleanup;
+ if(func) { /* use callback to determine action */
+ rc = ssh_pki_export_pubkey_base64(pubkey, &found_base64);
+ if(rc != SSH_OK)
+ goto cleanup;
- foundkey.key = found_base64;
- foundkey.len = strlen(found_base64);
+ foundkey.key = found_base64;
+ foundkey.len = strlen(found_base64);
- switch(ssh_key_type(pubkey)) {
+ switch(ssh_key_type(pubkey)) {
case SSH_KEYTYPE_RSA:
foundkey.keytype = CURLKHTYPE_RSA;
break;
@@ -501,15 +498,15 @@ static int myssh_is_known(struct Curl_easy *data)
default:
rc = SSH_ERROR;
goto cleanup;
- }
+ }
- Curl_set_in_callback(data, true);
- rc = func(data, knownkeyp, /* from the knownhosts file */
- &foundkey, /* from the remote host */
- keymatch, data->set.ssh_keyfunc_userp);
- Curl_set_in_callback(data, false);
+ Curl_set_in_callback(data, true);
+ rc = func(data, knownkeyp, /* from the knownhosts file */
+ &foundkey, /* from the remote host */
+ keymatch, data->set.ssh_keyfunc_userp);
+ Curl_set_in_callback(data, false);
- switch(rc) {
+ switch(rc) {
case CURLKHSTAT_FINE_ADD_TO_FILE:
#if LIBSSH_VERSION_INT >= SSH_VERSION_INT(0,8,0)
rc = ssh_session_update_known_hosts(sshc->ssh_session);
@@ -525,12 +522,13 @@ static int myssh_is_known(struct Curl_easy *data)
default: /* REJECT/DEFER */
rc = SSH_ERROR;
goto cleanup;
+ }
}
- }
- else {
- if(keymatch != CURLKHMATCH_OK) {
- rc = SSH_ERROR;
- goto cleanup;
+ else {
+ if(keymatch != CURLKHMATCH_OK) {
+ rc = SSH_ERROR;
+ goto cleanup;
+ }
}
}
rc = SSH_OK;
@@ -663,7 +661,7 @@ restart:
/*
* ssh_statemach_act() runs the SSH state machine as far as it can without
- * blocking and without reaching the end. The data the pointer 'block' points
+ * blocking and without reaching the end. The data the pointer 'block' points
* to will be set to TRUE if the libssh function returns SSH_AGAIN
* meaning it wants to be called again when the socket is ready
*/
@@ -677,7 +675,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block)
int rc = SSH_NO_ERROR, err;
int seekerr = CURL_SEEKFUNC_OK;
const char *err_msg;
- *block = 0; /* we're not blocking by default */
+ *block = 0; /* we are not blocking by default */
do {
@@ -742,7 +740,8 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block)
break;
}
- sshc->auth_methods = ssh_userauth_list(sshc->ssh_session, NULL);
+ sshc->auth_methods =
+ (unsigned int)ssh_userauth_list(sshc->ssh_session, NULL);
if(sshc->auth_methods)
infof(data, "SSH authentication methods available: %s%s%s%s",
sshc->auth_methods & SSH_AUTH_METHOD_PUBLICKEY ?
@@ -1308,7 +1307,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block)
failf(data, "Could not seek stream");
return CURLE_FTP_COULDNT_USE_REST;
}
- /* seekerr == CURL_SEEKFUNC_CANTSEEK (can't seek to offset) */
+ /* seekerr == CURL_SEEKFUNC_CANTSEEK (cannot seek to offset) */
do {
char scratch[4*1024];
size_t readthisamountnow =
@@ -1351,12 +1350,12 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block)
Curl_pgrsSetUploadSize(data, data->state.infilesize);
}
/* upload data */
- Curl_xfer_setup(data, -1, -1, FALSE, FIRSTSOCKET);
+ Curl_xfer_setup1(data, CURL_XFER_SEND, -1, FALSE);
/* not set by Curl_xfer_setup to preserve keepon bits */
conn->sockfd = conn->writesockfd;
- /* store this original bitmask setup to use later on if we can't
+ /* store this original bitmask setup to use later on if we cannot
figure out a "real" bitmask */
sshc->orig_waitfor = data->req.keepon;
@@ -1365,7 +1364,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block)
with both accordingly */
data->state.select_bits = CURL_CSELECT_OUT;
- /* since we don't really wait for anything at this point, we want the
+ /* since we do not really wait for anything at this point, we want the
state machine to move on as soon as possible so we set a very short
timeout here */
Curl_expire(data, 0, EXPIRE_RUN_NOW);
@@ -1404,7 +1403,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block)
++sshc->slash_pos;
if(rc < 0) {
/*
- * Abort if failure wasn't that the dir already exists or the
+ * Abort if failure was not that the dir already exists or the
* permission was denied (creation might succeed further down the
* path) - retry on unspecific FAILURE also
*/
@@ -1577,7 +1576,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block)
sshc->sftp_dir = NULL;
/* no data to transfer */
- Curl_xfer_setup(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup_nop(data);
state(data, SSH_STOP);
break;
@@ -1611,9 +1610,9 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block)
!(attrs->flags & SSH_FILEXFER_ATTR_SIZE) ||
(attrs->size == 0)) {
/*
- * sftp_fstat didn't return an error, so maybe the server
- * just doesn't support stat()
- * OR the server doesn't return a file size with a stat()
+ * sftp_fstat did not return an error, so maybe the server
+ * just does not support stat()
+ * OR the server does not return a file size with a stat()
* OR file size is 0
*/
data->req.size = -1;
@@ -1686,7 +1685,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block)
/* We can resume if we can seek to the resume position */
if(data->state.resume_from) {
if(data->state.resume_from < 0) {
- /* We're supposed to download the last abs(from) bytes */
+ /* We are supposed to download the last abs(from) bytes */
if((curl_off_t)size < -data->state.resume_from) {
failf(data, "Offset (%"
CURL_FORMAT_CURL_OFF_T ") was beyond file size (%"
@@ -1722,12 +1721,12 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block)
/* Setup the actual download */
if(data->req.size == 0) {
/* no data to transfer */
- Curl_xfer_setup(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup_nop(data);
infof(data, "File already completely downloaded");
state(data, SSH_STOP);
break;
}
- Curl_xfer_setup(data, FIRSTSOCKET, data->req.size, FALSE, -1);
+ Curl_xfer_setup1(data, CURL_XFER_RECV, data->req.size, FALSE);
/* not set by Curl_xfer_setup to preserve keepon bits */
conn->writesockfd = conn->sockfd;
@@ -1851,12 +1850,12 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block)
}
/* upload data */
- Curl_xfer_setup(data, -1, data->req.size, FALSE, FIRSTSOCKET);
+ Curl_xfer_setup1(data, CURL_XFER_SEND, -1, FALSE);
/* not set by Curl_xfer_setup to preserve keepon bits */
conn->sockfd = conn->writesockfd;
- /* store this original bitmask setup to use later on if we can't
+ /* store this original bitmask setup to use later on if we cannot
figure out a "real" bitmask */
sshc->orig_waitfor = data->req.keepon;
@@ -1895,7 +1894,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block)
/* download data */
bytecount = ssh_scp_request_get_size(sshc->scp_session);
data->req.maxdownload = (curl_off_t) bytecount;
- Curl_xfer_setup(data, FIRSTSOCKET, bytecount, FALSE, -1);
+ Curl_xfer_setup1(data, CURL_XFER_RECV, bytecount, FALSE);
/* not set by Curl_xfer_setup to preserve keepon bits */
conn->writesockfd = conn->sockfd;
@@ -1946,7 +1945,7 @@ static CURLcode myssh_statemach_act(struct Curl_easy *data, bool *block)
FALLTHROUGH();
case SSH_SESSION_DISCONNECT:
- /* during weird times when we've been prematurely aborted, the channel
+ /* during weird times when we have been prematurely aborted, the channel
is still alive when we reach this state and we MUST kill the channel
properly first */
if(sshc->scp_session) {
@@ -2063,7 +2062,7 @@ static void myssh_block2waitfor(struct connectdata *conn, bool block)
{
struct ssh_conn *sshc = &conn->proto.sshc;
- /* If it didn't block, or nothing was returned by ssh_get_poll_flags
+ /* If it did not block, or nothing was returned by ssh_get_poll_flags
* have the original set */
conn->waitfor = sshc->orig_waitfor;
@@ -2358,7 +2357,7 @@ static CURLcode scp_disconnect(struct Curl_easy *data,
(void) dead_connection;
if(ssh->ssh_session) {
- /* only if there's a session still around to use! */
+ /* only if there is a session still around to use! */
state(data, SSH_SESSION_DISCONNECT);
@@ -2523,7 +2522,7 @@ static CURLcode sftp_disconnect(struct Curl_easy *data,
DEBUGF(infof(data, "SSH DISCONNECT starts now"));
if(conn->proto.sshc.ssh_session) {
- /* only if there's a session still around to use! */
+ /* only if there is a session still around to use! */
state(data, SSH_SFTP_SHUTDOWN);
result = myssh_block_statemach(data, TRUE);
}
@@ -2613,7 +2612,7 @@ static ssize_t sftp_recv(struct Curl_easy *data, int sockindex,
nread = sftp_async_read(conn->proto.sshc.sftp_file,
mem, (uint32_t)len,
- conn->proto.sshc.sftp_file_index);
+ (uint32_t)conn->proto.sshc.sftp_file_index);
myssh_block2waitfor(conn, (nread == SSH_AGAIN)?TRUE:FALSE);
@@ -2717,7 +2716,7 @@ static void sftp_quote(struct Curl_easy *data)
}
/*
- * SFTP is a binary protocol, so we don't send text commands
+ * SFTP is a binary protocol, so we do not send text commands
* to the server. Instead, we scan for commands used by
* OpenSSH's sftp program and call the appropriate libssh
* functions.
diff --git a/libs/libcurl/src/vssh/libssh2.c b/libs/libcurl/src/vssh/libssh2.c
index eff5bd8fe1..88d5469272 100644
--- a/libs/libcurl/src/vssh/libssh2.c
+++ b/libs/libcurl/src/vssh/libssh2.c
@@ -405,8 +405,8 @@ static int sshkeycallback(struct Curl_easy *easy,
#endif
/*
- * Earlier libssh2 versions didn't have the ability to seek to 64bit positions
- * with 32bit size_t.
+ * Earlier libssh2 versions did not have the ability to seek to 64-bit
+ * positions with 32-bit size_t.
*/
#ifdef HAVE_LIBSSH2_SFTP_SEEK64
#define SFTP_SEEK(x,y) libssh2_sftp_seek64(x, (libssh2_uint64_t)y)
@@ -415,27 +415,27 @@ static int sshkeycallback(struct Curl_easy *easy,
#endif
/*
- * Earlier libssh2 versions didn't do SCP properly beyond 32bit sizes on 32bit
- * architectures so we check of the necessary function is present.
+ * Earlier libssh2 versions did not do SCP properly beyond 32-bit sizes on
+ * 32-bit architectures so we check of the necessary function is present.
*/
#ifndef HAVE_LIBSSH2_SCP_SEND64
#define SCP_SEND(a,b,c,d) libssh2_scp_send_ex(a, b, (int)(c), (size_t)d, 0, 0)
#else
#define SCP_SEND(a,b,c,d) libssh2_scp_send64(a, b, (int)(c), \
- (libssh2_uint64_t)d, 0, 0)
+ (libssh2_int64_t)d, 0, 0)
#endif
/*
- * libssh2 1.2.8 fixed the problem with 32bit ints used for sockets on win64.
+ * libssh2 1.2.8 fixed the problem with 32-bit ints used for sockets on win64.
*/
#ifdef HAVE_LIBSSH2_SESSION_HANDSHAKE
#define session_startup(x,y) libssh2_session_handshake(x, y)
#else
#define session_startup(x,y) libssh2_session_startup(x, (int)y)
#endif
-static int convert_ssh2_keytype(int sshkeytype)
+static enum curl_khtype convert_ssh2_keytype(int sshkeytype)
{
- int keytype = CURLKHTYPE_UNKNOWN;
+ enum curl_khtype keytype = CURLKHTYPE_UNKNOWN;
switch(sshkeytype) {
case LIBSSH2_HOSTKEY_TYPE_RSA:
keytype = CURLKHTYPE_RSA;
@@ -476,7 +476,7 @@ static CURLcode ssh_knownhost(struct Curl_easy *data)
#ifdef HAVE_LIBSSH2_KNOWNHOST_API
if(data->set.str[STRING_SSH_KNOWNHOSTS]) {
- /* we're asked to verify the host against a file */
+ /* we are asked to verify the host against a file */
struct connectdata *conn = data->conn;
struct ssh_conn *sshc = &conn->proto.sshc;
struct libssh2_knownhost *host = NULL;
@@ -487,8 +487,8 @@ static CURLcode ssh_knownhost(struct Curl_easy *data)
if(remotekey) {
/*
- * A subject to figure out is what host name we need to pass in here.
- * What host name does OpenSSH store in its file if an IDN name is
+ * A subject to figure out is what hostname we need to pass in here.
+ * What hostname does OpenSSH store in its file if an IDN name is
* used?
*/
enum curl_khmatch keymatch;
@@ -526,7 +526,7 @@ static CURLcode ssh_knownhost(struct Curl_easy *data)
break;
#endif
default:
- infof(data, "unsupported key type, can't check knownhosts");
+ infof(data, "unsupported key type, cannot check knownhosts");
keybit = 0;
break;
}
@@ -600,7 +600,7 @@ static CURLcode ssh_knownhost(struct Curl_easy *data)
result = sshc->actualcode = CURLE_PEER_FAILED_VERIFICATION;
break;
case CURLKHSTAT_FINE_REPLACE:
- /* remove old host+key that doesn't match */
+ /* remove old host+key that does not match */
if(host)
libssh2_knownhost_del(sshc->kh, host);
FALLTHROUGH();
@@ -608,7 +608,7 @@ static CURLcode ssh_knownhost(struct Curl_easy *data)
case CURLKHSTAT_FINE_ADD_TO_FILE:
/* proceed */
if(keycheck != LIBSSH2_KNOWNHOST_CHECK_MATCH) {
- /* the found host+key didn't match but has been told to be fine
+ /* the found host+key did not match but has been told to be fine
anyway so we add it in memory */
int addrc = libssh2_knownhost_add(sshc->kh,
conn->host.name, NULL,
@@ -662,7 +662,7 @@ static CURLcode ssh_check_fingerprint(struct Curl_easy *data)
size_t b64_pos = 0;
#ifdef LIBSSH2_HOSTKEY_HASH_SHA256
- /* The fingerprint points to static storage (!), don't free() it. */
+ /* The fingerprint points to static storage (!), do not free() it. */
fingerprint = libssh2_hostkey_hash(sshc->ssh_session,
LIBSSH2_HOSTKEY_HASH_SHA256);
#else
@@ -742,7 +742,7 @@ static CURLcode ssh_check_fingerprint(struct Curl_easy *data)
LIBSSH2_HOSTKEY_HASH_MD5);
if(fingerprint) {
- /* The fingerprint points to static storage (!), don't free() it. */
+ /* The fingerprint points to static storage (!), do not free() it. */
int i;
for(i = 0; i < 16; i++) {
msnprintf(&md5buffer[i*2], 3, "%02x", (unsigned char) fingerprint[i]);
@@ -780,10 +780,10 @@ static CURLcode ssh_check_fingerprint(struct Curl_easy *data)
const char *remotekey = libssh2_session_hostkey(sshc->ssh_session,
&keylen, &sshkeytype);
if(remotekey) {
- int keytype = convert_ssh2_keytype(sshkeytype);
+ enum curl_khtype keytype = convert_ssh2_keytype(sshkeytype);
Curl_set_in_callback(data, true);
rc = data->set.ssh_hostkeyfunc(data->set.ssh_hostkeyfunc_userp,
- keytype, remotekey, keylen);
+ (int)keytype, remotekey, keylen);
Curl_set_in_callback(data, false);
if(rc!= CURLKHMATCH_OK) {
state(data, SSH_SESSION_FREE);
@@ -960,7 +960,7 @@ static CURLcode ssh_force_knownhost_key_type(struct Curl_easy *data)
/*
* ssh_statemach_act() runs the SSH state machine as far as it can without
- * blocking and without reaching the end. The data the pointer 'block' points
+ * blocking and without reaching the end. The data the pointer 'block' points
* to will be set to TRUE if the libssh2 function returns LIBSSH2_ERROR_EAGAIN
* meaning it wants to be called again when the socket is ready
*/
@@ -977,7 +977,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
unsigned long sftperr;
int seekerr = CURL_SEEKFUNC_OK;
size_t readdir_len;
- *block = 0; /* we're not blocking by default */
+ *block = 0; /* we are not blocking by default */
do {
switch(sshc->state) {
@@ -1037,7 +1037,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
* must never change it later. Thus, always specify the correct username
* here, even though the libssh2 docs kind of indicate that it should be
* possible to get a 'generic' list (not user-specific) of authentication
- * methods, presumably with a blank username. That won't work in my
+ * methods, presumably with a blank username. That will not work in my
* experience.
* So always specify it here.
*/
@@ -1440,7 +1440,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
if(sftperr)
result = sftp_libssh2_error_to_CURLE(sftperr);
else
- /* in this case, the error wasn't in the SFTP level but for example
+ /* in this case, the error was not in the SFTP level but for example
a time-out or similar */
result = CURLE_SSH;
sshc->actualcode = result;
@@ -1571,7 +1571,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
}
/*
- * SFTP is a binary protocol, so we don't send text commands
+ * SFTP is a binary protocol, so we do not send text commands
* to the server. Instead, we scan for commands used by
* OpenSSH's sftp program and call the appropriate libssh2
* functions.
@@ -1709,7 +1709,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
if(!strncasecompare(cmd, "chmod", 5)) {
/* Since chown and chgrp only set owner OR group but libssh2 wants to
* set them both at once, we need to obtain the current ownership
- * first. This takes an extra protocol round trip.
+ * first. This takes an extra protocol round trip.
*/
rc = libssh2_sftp_stat_ex(sshc->sftp_session, sshc->quote_path2,
curlx_uztoui(strlen(sshc->quote_path2)),
@@ -1786,7 +1786,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
}
#if SIZEOF_TIME_T > SIZEOF_LONG
if(date > 0xffffffff) {
- /* if 'long' can't old >32bit, this date cannot be sent */
+ /* if 'long' cannot old >32bit, this date cannot be sent */
failf(data, "date overflow");
fail = TRUE;
}
@@ -1860,7 +1860,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
case SSH_SFTP_QUOTE_MKDIR:
rc = libssh2_sftp_mkdir_ex(sshc->sftp_session, sshc->quote_path1,
curlx_uztoui(strlen(sshc->quote_path1)),
- data->set.new_directory_perms);
+ (long)data->set.new_directory_perms);
if(rc == LIBSSH2_ERROR_EAGAIN) {
break;
}
@@ -2026,7 +2026,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
break;
}
if(rc == 0) {
- data->info.filetime = attrs.mtime;
+ data->info.filetime = (time_t)attrs.mtime;
}
state(data, SSH_SFTP_TRANS_INIT);
@@ -2090,7 +2090,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
sshc->sftp_handle =
libssh2_sftp_open_ex(sshc->sftp_session, sshp->path,
curlx_uztoui(strlen(sshp->path)),
- flags, data->set.new_file_perms,
+ flags, (long)data->set.new_file_perms,
LIBSSH2_SFTP_OPENFILE);
if(!sshc->sftp_handle) {
@@ -2160,7 +2160,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
failf(data, "Could not seek stream");
return CURLE_FTP_COULDNT_USE_REST;
}
- /* seekerr == CURL_SEEKFUNC_CANTSEEK (can't seek to offset) */
+ /* seekerr == CURL_SEEKFUNC_CANTSEEK (cannot seek to offset) */
do {
char scratch[4*1024];
size_t readthisamountnow =
@@ -2199,7 +2199,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
Curl_pgrsSetUploadSize(data, data->state.infilesize);
}
/* upload data */
- Curl_xfer_setup(data, -1, -1, FALSE, FIRSTSOCKET);
+ Curl_xfer_setup1(data, CURL_XFER_SEND, -1, FALSE);
/* not set by Curl_xfer_setup to preserve keepon bits */
conn->sockfd = conn->writesockfd;
@@ -2209,7 +2209,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
sshc->actualcode = result;
}
else {
- /* store this original bitmask setup to use later on if we can't
+ /* store this original bitmask setup to use later on if we cannot
figure out a "real" bitmask */
sshc->orig_waitfor = data->req.keepon;
@@ -2218,7 +2218,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
with both accordingly */
data->state.select_bits = CURL_CSELECT_OUT;
- /* since we don't really wait for anything at this point, we want the
+ /* since we do not really wait for anything at this point, we want the
state machine to move on as soon as possible so we set a very short
timeout here */
Curl_expire(data, 0, EXPIRE_RUN_NOW);
@@ -2254,7 +2254,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
/* 'mode' - parameter is preliminary - default to 0644 */
rc = libssh2_sftp_mkdir_ex(sshc->sftp_session, sshp->path,
curlx_uztoui(strlen(sshp->path)),
- data->set.new_directory_perms);
+ (long)data->set.new_directory_perms);
if(rc == LIBSSH2_ERROR_EAGAIN) {
break;
}
@@ -2262,7 +2262,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
++sshc->slash_pos;
if(rc < 0) {
/*
- * Abort if failure wasn't that the dir already exists or the
+ * Abort if failure was not that the dir already exists or the
* permission was denied (creation might succeed further down the
* path) - retry on unspecific FAILURE also
*/
@@ -2402,7 +2402,8 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
rc =
libssh2_sftp_symlink_ex(sshc->sftp_session,
Curl_dyn_ptr(&sshp->readdir_link),
- (int)Curl_dyn_len(&sshp->readdir_link),
+ (unsigned int)
+ Curl_dyn_len(&sshp->readdir_link),
sshp->readdir_filename,
PATH_MAX, LIBSSH2_SFTP_READLINK);
if(rc == LIBSSH2_ERROR_EAGAIN) {
@@ -2452,7 +2453,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
Curl_safefree(sshp->readdir_longentry);
/* no data to transfer */
- Curl_xfer_setup(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup_nop(data);
state(data, SSH_STOP);
break;
@@ -2463,7 +2464,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
sshc->sftp_handle =
libssh2_sftp_open_ex(sshc->sftp_session, sshp->path,
curlx_uztoui(strlen(sshp->path)),
- LIBSSH2_FXF_READ, data->set.new_file_perms,
+ LIBSSH2_FXF_READ, (long)data->set.new_file_perms,
LIBSSH2_SFTP_OPENFILE);
if(!sshc->sftp_handle) {
if(libssh2_session_last_errno(sshc->ssh_session) ==
@@ -2496,9 +2497,9 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
!(attrs.flags & LIBSSH2_SFTP_ATTR_SIZE) ||
(attrs.filesize == 0)) {
/*
- * libssh2_sftp_open() didn't return an error, so maybe the server
- * just doesn't support stat()
- * OR the server doesn't return a file size with a stat()
+ * libssh2_sftp_open() did not return an error, so maybe the server
+ * just does not support stat()
+ * OR the server does not return a file size with a stat()
* OR file size is 0
*/
data->req.size = -1;
@@ -2563,7 +2564,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
/* We can resume if we can seek to the resume position */
if(data->state.resume_from) {
if(data->state.resume_from < 0) {
- /* We're supposed to download the last abs(from) bytes */
+ /* We are supposed to download the last abs(from) bytes */
if((curl_off_t)attrs.filesize < -data->state.resume_from) {
failf(data, "Offset (%"
CURL_FORMAT_CURL_OFF_T ") was beyond file size (%"
@@ -2594,12 +2595,12 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
/* Setup the actual download */
if(data->req.size == 0) {
/* no data to transfer */
- Curl_xfer_setup(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup_nop(data);
infof(data, "File already completely downloaded");
state(data, SSH_STOP);
break;
}
- Curl_xfer_setup(data, FIRSTSOCKET, data->req.size, FALSE, -1);
+ Curl_xfer_setup1(data, CURL_XFER_RECV, data->req.size, FALSE);
/* not set by Curl_xfer_setup to preserve keepon bits */
conn->writesockfd = conn->sockfd;
@@ -2713,7 +2714,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
case SSH_SCP_UPLOAD_INIT:
/*
* libssh2 requires that the destination path is a full path that
- * includes the destination file and name OR ends in a "/" . If this is
+ * includes the destination file and name OR ends in a "/" . If this is
* not done the destination file will be named the same name as the last
* directory in the path.
*/
@@ -2745,7 +2746,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
/* upload data */
data->req.size = data->state.infilesize;
Curl_pgrsSetUploadSize(data, data->state.infilesize);
- Curl_xfer_setup(data, -1, -1, FALSE, FIRSTSOCKET);
+ Curl_xfer_setup1(data, CURL_XFER_SEND, -1, FALSE);
/* not set by Curl_xfer_setup to preserve keepon bits */
conn->sockfd = conn->writesockfd;
@@ -2755,7 +2756,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
sshc->actualcode = result;
}
else {
- /* store this original bitmask setup to use later on if we can't
+ /* store this original bitmask setup to use later on if we cannot
figure out a "real" bitmask */
sshc->orig_waitfor = data->req.keepon;
@@ -2816,7 +2817,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
/* download data */
bytecount = (curl_off_t)sb.st_size;
data->req.maxdownload = (curl_off_t)sb.st_size;
- Curl_xfer_setup(data, FIRSTSOCKET, bytecount, FALSE, -1);
+ Curl_xfer_setup1(data, CURL_XFER_RECV, bytecount, FALSE);
/* not set by Curl_xfer_setup to preserve keepon bits */
conn->writesockfd = conn->sockfd;
@@ -2915,7 +2916,7 @@ static CURLcode ssh_statemach_act(struct Curl_easy *data, bool *block)
break;
case SSH_SESSION_DISCONNECT:
- /* during weird times when we've been prematurely aborted, the channel
+ /* during weird times when we have been prematurely aborted, the channel
is still alive when we reach this state and we MUST kill the channel
properly first */
if(sshc->ssh_channel) {
@@ -3071,7 +3072,7 @@ static int ssh_getsock(struct Curl_easy *data,
* When one of the libssh2 functions has returned LIBSSH2_ERROR_EAGAIN this
* function is used to figure out in what direction and stores this info so
* that the multi interface can take advantage of it. Make sure to call this
- * function in all cases so that when it _doesn't_ return EAGAIN we can
+ * function in all cases so that when it _does not_ return EAGAIN we can
* restore the default wait bits.
*/
static void ssh_block2waitfor(struct Curl_easy *data, bool block)
@@ -3088,7 +3089,7 @@ static void ssh_block2waitfor(struct Curl_easy *data, bool block)
}
}
if(!dir)
- /* It didn't block or libssh2 didn't reveal in which direction, put back
+ /* It did not block or libssh2 did not reveal in which direction, put back
the original set */
conn->waitfor = sshc->orig_waitfor;
}
@@ -3104,7 +3105,7 @@ static CURLcode ssh_multi_statemach(struct Curl_easy *data, bool *done)
do {
result = ssh_statemach_act(data, &block);
*done = (sshc->state == SSH_STOP) ? TRUE : FALSE;
- /* if there's no error, it isn't done and it didn't EWOULDBLOCK, then
+ /* if there is no error, it is not done and it did not EWOULDBLOCK, then
try again */
} while(!result && !*done && !block);
ssh_block2waitfor(data, block);
@@ -3290,7 +3291,7 @@ static CURLcode ssh_connect(struct Curl_easy *data, bool *done)
#if LIBSSH2_VERSION_NUM >= 0x010B00
if(data->set.server_response_timeout > 0) {
libssh2_session_set_read_timeout(sshc->ssh_session,
- data->set.server_response_timeout / 1000);
+ (long)(data->set.server_response_timeout / 1000));
}
#endif
@@ -3491,7 +3492,7 @@ static CURLcode scp_disconnect(struct Curl_easy *data,
(void) dead_connection;
if(sshc->ssh_session) {
- /* only if there's a session still around to use! */
+ /* only if there is a session still around to use! */
state(data, SSH_SESSION_DISCONNECT);
result = ssh_block_statemach(data, conn, TRUE);
}
@@ -3647,7 +3648,7 @@ static CURLcode sftp_disconnect(struct Curl_easy *data,
DEBUGF(infof(data, "SSH DISCONNECT starts now"));
if(sshc->ssh_session) {
- /* only if there's a session still around to use! */
+ /* only if there is a session still around to use! */
state(data, SSH_SFTP_SHUTDOWN);
result = ssh_block_statemach(data, conn, TRUE);
}
diff --git a/libs/libcurl/src/vssh/ssh.h b/libs/libcurl/src/vssh/ssh.h
index ab9ac759d5..04ade52823 100644
--- a/libs/libcurl/src/vssh/ssh.h
+++ b/libs/libcurl/src/vssh/ssh.h
@@ -163,7 +163,7 @@ struct ssh_conn {
unsigned kbd_state; /* 0 or 1 */
ssh_key privkey;
ssh_key pubkey;
- int auth_methods;
+ unsigned int auth_methods;
ssh_session ssh_session;
ssh_scp scp_session;
sftp_session sftp_session;
@@ -243,10 +243,10 @@ struct ssh_conn {
#endif
#ifdef HAVE_LIBSSH2_VERSION
-/* get it run-time if possible */
+/* get it runtime if possible */
#define CURL_LIBSSH2_VERSION libssh2_version(0)
#else
-/* use build-time if run-time not possible */
+/* use build-time if runtime not possible */
#define CURL_LIBSSH2_VERSION LIBSSH2_VERSION
#endif
diff --git a/libs/libcurl/src/vssh/wolfssh.c b/libs/libcurl/src/vssh/wolfssh.c
index 58009faef0..bf1f16463c 100644
--- a/libs/libcurl/src/vssh/wolfssh.c
+++ b/libs/libcurl/src/vssh/wolfssh.c
@@ -400,7 +400,7 @@ static CURLcode wssh_connect(struct Curl_easy *data, bool *done)
rc = wolfSSH_SetUsername(sshc->ssh_session, conn->user);
if(rc != WS_SUCCESS) {
- failf(data, "wolfSSH failed to set user name");
+ failf(data, "wolfSSH failed to set username");
goto error;
}
@@ -433,7 +433,7 @@ error:
/*
* wssh_statemach_act() runs the SSH state machine as far as it can without
- * blocking and without reaching the end. The data the pointer 'block' points
+ * blocking and without reaching the end. The data the pointer 'block' points
* to will be set to TRUE if the wolfssh function returns EAGAIN meaning it
* wants to be called again when the socket is ready
*/
@@ -446,7 +446,7 @@ static CURLcode wssh_statemach_act(struct Curl_easy *data, bool *block)
struct SSHPROTO *sftp_scp = data->req.p.ssh;
WS_SFTPNAME *name;
int rc = 0;
- *block = FALSE; /* we're not blocking by default */
+ *block = FALSE; /* we are not blocking by default */
do {
switch(sshc->state) {
@@ -641,7 +641,7 @@ static CURLcode wssh_statemach_act(struct Curl_easy *data, bool *block)
failf(data, "Could not seek stream");
return CURLE_FTP_COULDNT_USE_REST;
}
- /* seekerr == CURL_SEEKFUNC_CANTSEEK (can't seek to offset) */
+ /* seekerr == CURL_SEEKFUNC_CANTSEEK (cannot seek to offset) */
do {
char scratch[4*1024];
size_t readthisamountnow =
@@ -680,7 +680,7 @@ static CURLcode wssh_statemach_act(struct Curl_easy *data, bool *block)
Curl_pgrsSetUploadSize(data, data->state.infilesize);
}
/* upload data */
- Curl_xfer_setup(data, -1, -1, FALSE, FIRSTSOCKET);
+ Curl_xfer_setup1(data, CURL_XFER_SEND, -1, FALSE);
/* not set by Curl_xfer_setup to preserve keepon bits */
conn->sockfd = conn->writesockfd;
@@ -690,7 +690,7 @@ static CURLcode wssh_statemach_act(struct Curl_easy *data, bool *block)
sshc->actualcode = result;
}
else {
- /* store this original bitmask setup to use later on if we can't
+ /* store this original bitmask setup to use later on if we cannot
figure out a "real" bitmask */
sshc->orig_waitfor = data->req.keepon;
@@ -699,7 +699,7 @@ static CURLcode wssh_statemach_act(struct Curl_easy *data, bool *block)
with both accordingly */
data->state.select_bits = CURL_CSELECT_OUT;
- /* since we don't really wait for anything at this point, we want the
+ /* since we do not really wait for anything at this point, we want the
state machine to move on as soon as possible so we set a very short
timeout here */
Curl_expire(data, 0, EXPIRE_RUN_NOW);
@@ -780,12 +780,12 @@ static CURLcode wssh_statemach_act(struct Curl_easy *data, bool *block)
/* Setup the actual download */
if(data->req.size == 0) {
/* no data to transfer */
- Curl_xfer_setup(data, -1, -1, FALSE, -1);
+ Curl_xfer_setup_nop(data);
infof(data, "File already completely downloaded");
state(data, SSH_STOP);
break;
}
- Curl_xfer_setup(data, FIRSTSOCKET, data->req.size, FALSE, -1);
+ Curl_xfer_setup1(data, CURL_XFER_RECV, data->req.size, FALSE);
/* not set by Curl_xfer_setup to preserve keepon bits */
conn->writesockfd = conn->sockfd;
@@ -908,7 +908,7 @@ static CURLcode wssh_multi_statemach(struct Curl_easy *data, bool *done)
do {
result = wssh_statemach_act(data, &block);
*done = (sshc->state == SSH_STOP) ? TRUE : FALSE;
- /* if there's no error, it isn't done and it didn't EWOULDBLOCK, then
+ /* if there is no error, it is not done and it did not EWOULDBLOCK, then
try again */
if(*done) {
DEBUGF(infof(data, "wssh_statemach_act says DONE"));
@@ -1121,7 +1121,7 @@ static CURLcode wsftp_disconnect(struct Curl_easy *data,
DEBUGF(infof(data, "SSH DISCONNECT starts now"));
if(conn->proto.sshc.ssh_session) {
- /* only if there's a session still around to use! */
+ /* only if there is a session still around to use! */
state(data, SSH_SFTP_SHUTDOWN);
result = wssh_block_statemach(data, TRUE);
}
diff --git a/libs/libcurl/src/vtls/bearssl.c b/libs/libcurl/src/vtls/bearssl.c
index f5f6ad3756..fdbb4f648a 100644
--- a/libs/libcurl/src/vtls/bearssl.c
+++ b/libs/libcurl/src/vtls/bearssl.c
@@ -63,6 +63,7 @@ struct bearssl_ssl_backend_data {
bool active;
/* size of pending write, yet to be flushed */
size_t pending_write;
+ BIT(sent_shutdown);
};
struct cafile_parser {
@@ -327,7 +328,7 @@ static unsigned x509_end_chain(const br_x509_class **ctx)
struct x509_context *x509 = (struct x509_context *)ctx;
if(!x509->verifypeer) {
- return br_x509_decoder_last_error(&x509->decoder);
+ return (unsigned)br_x509_decoder_last_error(&x509->decoder);
}
return x509->minimal.vtable->end_chain(&x509->minimal.vtable);
@@ -583,7 +584,7 @@ static CURLcode bearssl_connect_step1(struct Curl_cfilter *cf,
backend->x509.verifyhost = verifyhost;
br_ssl_engine_set_x509(&backend->ctx.eng, &backend->x509.vtable);
- if(ssl_config->primary.sessionid) {
+ if(ssl_config->primary.cache_session) {
void *session;
CURL_TRC_CF(data, cf, "connect_step1, check session cache");
@@ -722,6 +723,8 @@ static CURLcode bearssl_run_until(struct Curl_cfilter *cf,
ret = Curl_conn_cf_send(cf->next, data, (char *)buf, len, &result);
CURL_TRC_CF(data, cf, "ssl_send(len=%zu) -> %zd, %d", len, ret, result);
if(ret <= 0) {
+ if(result == CURLE_AGAIN)
+ connssl->io_need |= CURL_SSL_IO_NEED_SEND;
return result;
}
br_ssl_engine_sendrec_ack(&backend->ctx.eng, ret);
@@ -735,6 +738,8 @@ static CURLcode bearssl_run_until(struct Curl_cfilter *cf,
return CURLE_RECV_ERROR;
}
if(ret <= 0) {
+ if(result == CURLE_AGAIN)
+ connssl->io_need |= CURL_SSL_IO_NEED_RECV;
return result;
}
br_ssl_engine_recvrec_ack(&backend->ctx.eng, ret);
@@ -813,9 +818,7 @@ static CURLcode bearssl_connect_step3(struct Curl_cfilter *cf,
proto? strlen(proto) : 0);
}
- if(ssl_config->primary.sessionid) {
- bool incache;
- void *oldsession;
+ if(ssl_config->primary.cache_session) {
br_ssl_session_parameters *session;
session = malloc(sizeof(*session));
@@ -823,13 +826,8 @@ static CURLcode bearssl_connect_step3(struct Curl_cfilter *cf,
return CURLE_OUT_OF_MEMORY;
br_ssl_engine_get_session_parameters(&backend->ctx.eng, session);
Curl_ssl_sessionid_lock(data);
- incache = !(Curl_ssl_getsessionid(cf, data, &connssl->peer,
- &oldsession, NULL));
- if(incache)
- Curl_ssl_delsessionid(data, oldsession);
-
- ret = Curl_ssl_addsessionid(cf, data, &connssl->peer, session, 0,
- bearssl_session_free);
+ ret = Curl_ssl_set_sessionid(cf, data, &connssl->peer, session, 0,
+ bearssl_session_free);
Curl_ssl_sessionid_unlock(data);
if(ret)
return ret;
@@ -925,9 +923,7 @@ static CURLcode bearssl_connect_common(struct Curl_cfilter *cf,
return ret;
}
- while(ssl_connect_2 == connssl->connecting_state ||
- ssl_connect_2_reading == connssl->connecting_state ||
- ssl_connect_2_writing == connssl->connecting_state) {
+ while(ssl_connect_2 == connssl->connecting_state) {
/* check allowed time left */
timeout_ms = Curl_timeleft(data, NULL, TRUE);
@@ -937,14 +933,13 @@ static CURLcode bearssl_connect_common(struct Curl_cfilter *cf,
return CURLE_OPERATION_TIMEDOUT;
}
- /* if ssl is expecting something, check if it's available. */
- if(ssl_connect_2_reading == connssl->connecting_state ||
- ssl_connect_2_writing == connssl->connecting_state) {
+ /* if ssl is expecting something, check if it is available. */
+ if(connssl->io_need) {
- curl_socket_t writefd = ssl_connect_2_writing ==
- connssl->connecting_state?sockfd:CURL_SOCKET_BAD;
- curl_socket_t readfd = ssl_connect_2_reading ==
- connssl->connecting_state?sockfd:CURL_SOCKET_BAD;
+ curl_socket_t writefd = (connssl->io_need & CURL_SSL_IO_NEED_SEND)?
+ sockfd:CURL_SOCKET_BAD;
+ curl_socket_t readfd = (connssl->io_need & CURL_SSL_IO_NEED_RECV)?
+ sockfd:CURL_SOCKET_BAD;
CURL_TRC_CF(data, cf, "connect_common, check socket");
what = Curl_socket_check(readfd, CURL_SOCKET_BAD, writefd,
@@ -975,11 +970,9 @@ static CURLcode bearssl_connect_common(struct Curl_cfilter *cf,
* before step2 has completed while ensuring that a client using select()
* or epoll() will always have a valid fdset to wait on.
*/
+ connssl->io_need = CURL_SSL_IO_NEED_NONE;
ret = bearssl_connect_step2(cf, data);
- if(ret || (nonblocking &&
- (ssl_connect_2 == connssl->connecting_state ||
- ssl_connect_2_reading == connssl->connecting_state ||
- ssl_connect_2_writing == connssl->connecting_state)))
+ if(ret || (nonblocking && (ssl_connect_2 == connssl->connecting_state)))
return ret;
}
@@ -1070,20 +1063,52 @@ static void *bearssl_get_internals(struct ssl_connect_data *connssl,
return &backend->ctx;
}
-static void bearssl_close(struct Curl_cfilter *cf, struct Curl_easy *data)
+static CURLcode bearssl_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ bool send_shutdown, bool *done)
{
struct ssl_connect_data *connssl = cf->ctx;
struct bearssl_ssl_backend_data *backend =
(struct bearssl_ssl_backend_data *)connssl->backend;
- size_t i;
+ CURLcode result;
DEBUGASSERT(backend);
+ if(!backend->active || cf->shutdown) {
+ *done = TRUE;
+ return CURLE_OK;
+ }
- if(backend->active) {
- backend->active = FALSE;
+ *done = FALSE;
+ if(!backend->sent_shutdown) {
+ (void)send_shutdown; /* unknown how to suppress our close notify */
br_ssl_engine_close(&backend->ctx.eng);
- (void)bearssl_run_until(cf, data, BR_SSL_CLOSED);
+ backend->sent_shutdown = TRUE;
+ }
+
+ result = bearssl_run_until(cf, data, BR_SSL_CLOSED);
+ if(result == CURLE_OK) {
+ *done = TRUE;
}
+ else if(result == CURLE_AGAIN)
+ result = CURLE_OK;
+ else
+ CURL_TRC_CF(data, cf, "shutdown error: %d", result);
+
+ cf->shutdown = (result || *done);
+ return result;
+}
+
+static void bearssl_close(struct Curl_cfilter *cf, struct Curl_easy *data)
+{
+ struct ssl_connect_data *connssl = cf->ctx;
+ struct bearssl_ssl_backend_data *backend =
+ (struct bearssl_ssl_backend_data *)connssl->backend;
+ size_t i;
+
+ (void)data;
+ DEBUGASSERT(backend);
+
+ backend->active = FALSE;
if(backend->anchors) {
for(i = 0; i < backend->anchors_len; ++i)
free(backend->anchors[i].dn.data);
@@ -1113,7 +1138,7 @@ const struct Curl_ssl Curl_ssl_bearssl = {
Curl_none_cleanup, /* cleanup */
bearssl_version, /* version */
Curl_none_check_cxn, /* check_cxn */
- Curl_none_shutdown, /* shutdown */
+ bearssl_shutdown, /* shutdown */
bearssl_data_pending, /* data_pending */
bearssl_random, /* random */
Curl_none_cert_status_request, /* cert_status_request */
@@ -1130,7 +1155,6 @@ const struct Curl_ssl Curl_ssl_bearssl = {
bearssl_sha256sum, /* sha256sum */
NULL, /* associate_connection */
NULL, /* disassociate_connection */
- NULL, /* free_multi_ssl_backend_data */
bearssl_recv, /* recv decrypted data */
bearssl_send, /* send data to encrypt */
};
diff --git a/libs/libcurl/src/vtls/cipher_suite.c b/libs/libcurl/src/vtls/cipher_suite.c
index 723b94d13f..76a0d424ff 100644
--- a/libs/libcurl/src/vtls/cipher_suite.c
+++ b/libs/libcurl/src/vtls/cipher_suite.c
@@ -23,7 +23,7 @@
***************************************************************************/
#include "curl_setup.h"
-#if defined(USE_MBEDTLS) || defined(USE_BEARSSL)
+#if defined(USE_SECTRANSP) || defined(USE_MBEDTLS) || defined(USE_BEARSSL)
#include "cipher_suite.h"
#include "curl_printf.h"
#include "strcase.h"
@@ -33,7 +33,7 @@
* To support the CURLOPT_SSL_CIPHER_LIST option on SSL backends
* that do not support it natively, but do support setting a list of
* IANA ids, we need a list of all supported cipher suite names
- * (openssl and IANA) to be able to look up the IANA ids.
+ * (OpenSSL and IANA) to be able to look up the IANA ids.
*
* To keep the binary size of this list down we compress each entry
* down to 2 + 6 bytes using the C preprocessor.
@@ -42,7 +42,7 @@
/*
* mbedTLS NOTE: mbedTLS has mbedtls_ssl_get_ciphersuite_id() to
* convert a string representation to an IANA id, we do not use that
- * because it does not support "standard" openssl cipher suite
+ * because it does not support "standard" OpenSSL cipher suite
* names, nor IANA names.
*/
@@ -89,6 +89,21 @@ static const char *cs_txt =
"CAMELLIA128" "\0"
"CAMELLIA256" "\0"
#endif
+#if defined(USE_SECTRANSP)
+ "40" "\0"
+ "ADH" "\0"
+ "AECDH" "\0"
+ "anon" "\0"
+ "DES40" "\0"
+ "DH" "\0"
+ "DSS" "\0"
+ "EDH" "\0"
+ "EXP" "\0"
+ "EXPORT" "\0"
+ "IDEA" "\0"
+ "RC2" "\0"
+ "RC4" "\0"
+#endif
;
/* Indexes of above cs_txt */
enum {
@@ -130,6 +145,21 @@ enum {
CS_TXT_IDX_CAMELLIA128,
CS_TXT_IDX_CAMELLIA256,
#endif
+#if defined(USE_SECTRANSP)
+ CS_TXT_IDX_40,
+ CS_TXT_IDX_ADH,
+ CS_TXT_IDX_AECDH,
+ CS_TXT_IDX_anon,
+ CS_TXT_IDX_DES40,
+ CS_TXT_IDX_DH,
+ CS_TXT_IDX_DSS,
+ CS_TXT_IDX_EDH,
+ CS_TXT_IDX_EXP,
+ CS_TXT_IDX_EXPORT,
+ CS_TXT_IDX_IDEA,
+ CS_TXT_IDX_RC2,
+ CS_TXT_IDX_RC4,
+#endif
CS_TXT_LEN,
};
@@ -224,7 +254,7 @@ static const struct cs_entry cs_list [] = {
CS_ENTRY(0xCCA8, ECDHE,RSA,CHACHA20,POLY1305,,,,),
CS_ENTRY(0xCCA9, TLS,ECDHE,ECDSA,WITH,CHACHA20,POLY1305,SHA256,),
CS_ENTRY(0xCCA9, ECDHE,ECDSA,CHACHA20,POLY1305,,,,),
-#if defined(USE_MBEDTLS)
+#if defined(USE_SECTRANSP) || defined(USE_MBEDTLS)
CS_ENTRY(0x0001, TLS,RSA,WITH,NULL,MD5,,,),
CS_ENTRY(0x0001, NULL,MD5,,,,,,),
CS_ENTRY(0x0002, TLS,RSA,WITH,NULL,SHA,,,),
@@ -317,7 +347,7 @@ static const struct cs_entry cs_list [] = {
CS_ENTRY(0xCCAB, TLS,PSK,WITH,CHACHA20,POLY1305,SHA256,,),
CS_ENTRY(0xCCAB, PSK,CHACHA20,POLY1305,,,,,),
#endif
-#if defined(USE_BEARSSL)
+#if defined(USE_SECTRANSP) || defined(USE_BEARSSL)
CS_ENTRY(0x000A, TLS,RSA,WITH,3DES,EDE,CBC,SHA,),
CS_ENTRY(0x000A, DES,CBC3,SHA,,,,,),
CS_ENTRY(0xC003, TLS,ECDH,ECDSA,WITH,3DES,EDE,CBC,SHA),
@@ -329,6 +359,7 @@ static const struct cs_entry cs_list [] = {
CS_ENTRY(0xC012, TLS,ECDHE,RSA,WITH,3DES,EDE,CBC,SHA),
CS_ENTRY(0xC012, ECDHE,RSA,DES,CBC3,SHA,,,),
#endif
+#if defined(USE_MBEDTLS) || defined(USE_BEARSSL)
CS_ENTRY(0xC09C, TLS,RSA,WITH,AES,128,CCM,,),
CS_ENTRY(0xC09C, AES128,CCM,,,,,,),
CS_ENTRY(0xC09D, TLS,RSA,WITH,AES,256,CCM,,),
@@ -345,8 +376,144 @@ static const struct cs_entry cs_list [] = {
CS_ENTRY(0xC0AE, ECDHE,ECDSA,AES128,CCM8,,,,),
CS_ENTRY(0xC0AF, TLS,ECDHE,ECDSA,WITH,AES,256,CCM,8),
CS_ENTRY(0xC0AF, ECDHE,ECDSA,AES256,CCM8,,,,),
+#endif
+#if defined(USE_SECTRANSP)
+ /* entries marked bc are backward compatible aliases for old OpenSSL names */
+ CS_ENTRY(0x0003, TLS,RSA,EXPORT,WITH,RC4,40,MD5,),
+ CS_ENTRY(0x0003, EXP,RC4,MD5,,,,,),
+ CS_ENTRY(0x0004, TLS,RSA,WITH,RC4,128,MD5,,),
+ CS_ENTRY(0x0004, RC4,MD5,,,,,,),
+ CS_ENTRY(0x0005, TLS,RSA,WITH,RC4,128,SHA,,),
+ CS_ENTRY(0x0005, RC4,SHA,,,,,,),
+ CS_ENTRY(0x0006, TLS,RSA,EXPORT,WITH,RC2,CBC,40,MD5),
+ CS_ENTRY(0x0006, EXP,RC2,CBC,MD5,,,,),
+ CS_ENTRY(0x0007, TLS,RSA,WITH,IDEA,CBC,SHA,,),
+ CS_ENTRY(0x0007, IDEA,CBC,SHA,,,,,),
+ CS_ENTRY(0x0008, TLS,RSA,EXPORT,WITH,DES40,CBC,SHA,),
+ CS_ENTRY(0x0008, EXP,DES,CBC,SHA,,,,),
+ CS_ENTRY(0x0009, TLS,RSA,WITH,DES,CBC,SHA,,),
+ CS_ENTRY(0x0009, DES,CBC,SHA,,,,,),
+ CS_ENTRY(0x000B, TLS,DH,DSS,EXPORT,WITH,DES40,CBC,SHA),
+ CS_ENTRY(0x000B, EXP,DH,DSS,DES,CBC,SHA,,),
+ CS_ENTRY(0x000C, TLS,DH,DSS,WITH,DES,CBC,SHA,),
+ CS_ENTRY(0x000C, DH,DSS,DES,CBC,SHA,,,),
+ CS_ENTRY(0x000D, TLS,DH,DSS,WITH,3DES,EDE,CBC,SHA),
+ CS_ENTRY(0x000D, DH,DSS,DES,CBC3,SHA,,,),
+ CS_ENTRY(0x000E, TLS,DH,RSA,EXPORT,WITH,DES40,CBC,SHA),
+ CS_ENTRY(0x000E, EXP,DH,RSA,DES,CBC,SHA,,),
+ CS_ENTRY(0x000F, TLS,DH,RSA,WITH,DES,CBC,SHA,),
+ CS_ENTRY(0x000F, DH,RSA,DES,CBC,SHA,,,),
+ CS_ENTRY(0x0010, TLS,DH,RSA,WITH,3DES,EDE,CBC,SHA),
+ CS_ENTRY(0x0010, DH,RSA,DES,CBC3,SHA,,,),
+ CS_ENTRY(0x0011, TLS,DHE,DSS,EXPORT,WITH,DES40,CBC,SHA),
+ CS_ENTRY(0x0011, EXP,DHE,DSS,DES,CBC,SHA,,),
+ CS_ENTRY(0x0011, EXP,EDH,DSS,DES,CBC,SHA,,), /* bc */
+ CS_ENTRY(0x0012, TLS,DHE,DSS,WITH,DES,CBC,SHA,),
+ CS_ENTRY(0x0012, DHE,DSS,DES,CBC,SHA,,,),
+ CS_ENTRY(0x0012, EDH,DSS,DES,CBC,SHA,,,), /* bc */
+ CS_ENTRY(0x0013, TLS,DHE,DSS,WITH,3DES,EDE,CBC,SHA),
+ CS_ENTRY(0x0013, DHE,DSS,DES,CBC3,SHA,,,),
+ CS_ENTRY(0x0013, EDH,DSS,DES,CBC3,SHA,,,), /* bc */
+ CS_ENTRY(0x0014, TLS,DHE,RSA,EXPORT,WITH,DES40,CBC,SHA),
+ CS_ENTRY(0x0014, EXP,DHE,RSA,DES,CBC,SHA,,),
+ CS_ENTRY(0x0014, EXP,EDH,RSA,DES,CBC,SHA,,), /* bc */
+ CS_ENTRY(0x0015, TLS,DHE,RSA,WITH,DES,CBC,SHA,),
+ CS_ENTRY(0x0015, DHE,RSA,DES,CBC,SHA,,,),
+ CS_ENTRY(0x0015, EDH,RSA,DES,CBC,SHA,,,), /* bc */
+ CS_ENTRY(0x0016, TLS,DHE,RSA,WITH,3DES,EDE,CBC,SHA),
+ CS_ENTRY(0x0016, DHE,RSA,DES,CBC3,SHA,,,),
+ CS_ENTRY(0x0016, EDH,RSA,DES,CBC3,SHA,,,), /* bc */
+ CS_ENTRY(0x0017, TLS,DH,anon,EXPORT,WITH,RC4,40,MD5),
+ CS_ENTRY(0x0017, EXP,ADH,RC4,MD5,,,,),
+ CS_ENTRY(0x0018, TLS,DH,anon,WITH,RC4,128,MD5,),
+ CS_ENTRY(0x0018, ADH,RC4,MD5,,,,,),
+ CS_ENTRY(0x0019, TLS,DH,anon,EXPORT,WITH,DES40,CBC,SHA),
+ CS_ENTRY(0x0019, EXP,ADH,DES,CBC,SHA,,,),
+ CS_ENTRY(0x001A, TLS,DH,anon,WITH,DES,CBC,SHA,),
+ CS_ENTRY(0x001A, ADH,DES,CBC,SHA,,,,),
+ CS_ENTRY(0x001B, TLS,DH,anon,WITH,3DES,EDE,CBC,SHA),
+ CS_ENTRY(0x001B, ADH,DES,CBC3,SHA,,,,),
+ CS_ENTRY(0x0030, TLS,DH,DSS,WITH,AES,128,CBC,SHA),
+ CS_ENTRY(0x0030, DH,DSS,AES128,SHA,,,,),
+ CS_ENTRY(0x0031, TLS,DH,RSA,WITH,AES,128,CBC,SHA),
+ CS_ENTRY(0x0031, DH,RSA,AES128,SHA,,,,),
+ CS_ENTRY(0x0032, TLS,DHE,DSS,WITH,AES,128,CBC,SHA),
+ CS_ENTRY(0x0032, DHE,DSS,AES128,SHA,,,,),
+ CS_ENTRY(0x0034, TLS,DH,anon,WITH,AES,128,CBC,SHA),
+ CS_ENTRY(0x0034, ADH,AES128,SHA,,,,,),
+ CS_ENTRY(0x0036, TLS,DH,DSS,WITH,AES,256,CBC,SHA),
+ CS_ENTRY(0x0036, DH,DSS,AES256,SHA,,,,),
+ CS_ENTRY(0x0037, TLS,DH,RSA,WITH,AES,256,CBC,SHA),
+ CS_ENTRY(0x0037, DH,RSA,AES256,SHA,,,,),
+ CS_ENTRY(0x0038, TLS,DHE,DSS,WITH,AES,256,CBC,SHA),
+ CS_ENTRY(0x0038, DHE,DSS,AES256,SHA,,,,),
+ CS_ENTRY(0x003A, TLS,DH,anon,WITH,AES,256,CBC,SHA),
+ CS_ENTRY(0x003A, ADH,AES256,SHA,,,,,),
+ CS_ENTRY(0x003E, TLS,DH,DSS,WITH,AES,128,CBC,SHA256),
+ CS_ENTRY(0x003E, DH,DSS,AES128,SHA256,,,,),
+ CS_ENTRY(0x003F, TLS,DH,RSA,WITH,AES,128,CBC,SHA256),
+ CS_ENTRY(0x003F, DH,RSA,AES128,SHA256,,,,),
+ CS_ENTRY(0x0040, TLS,DHE,DSS,WITH,AES,128,CBC,SHA256),
+ CS_ENTRY(0x0040, DHE,DSS,AES128,SHA256,,,,),
+ CS_ENTRY(0x0068, TLS,DH,DSS,WITH,AES,256,CBC,SHA256),
+ CS_ENTRY(0x0068, DH,DSS,AES256,SHA256,,,,),
+ CS_ENTRY(0x0069, TLS,DH,RSA,WITH,AES,256,CBC,SHA256),
+ CS_ENTRY(0x0069, DH,RSA,AES256,SHA256,,,,),
+ CS_ENTRY(0x006A, TLS,DHE,DSS,WITH,AES,256,CBC,SHA256),
+ CS_ENTRY(0x006A, DHE,DSS,AES256,SHA256,,,,),
+ CS_ENTRY(0x006C, TLS,DH,anon,WITH,AES,128,CBC,SHA256),
+ CS_ENTRY(0x006C, ADH,AES128,SHA256,,,,,),
+ CS_ENTRY(0x006D, TLS,DH,anon,WITH,AES,256,CBC,SHA256),
+ CS_ENTRY(0x006D, ADH,AES256,SHA256,,,,,),
+ CS_ENTRY(0x008A, TLS,PSK,WITH,RC4,128,SHA,,),
+ CS_ENTRY(0x008A, PSK,RC4,SHA,,,,,),
+ CS_ENTRY(0x008B, TLS,PSK,WITH,3DES,EDE,CBC,SHA,),
+ CS_ENTRY(0x008B, PSK,3DES,EDE,CBC,SHA,,,),
+ CS_ENTRY(0x008E, TLS,DHE,PSK,WITH,RC4,128,SHA,),
+ CS_ENTRY(0x008E, DHE,PSK,RC4,SHA,,,,),
+ CS_ENTRY(0x008F, TLS,DHE,PSK,WITH,3DES,EDE,CBC,SHA),
+ CS_ENTRY(0x008F, DHE,PSK,3DES,EDE,CBC,SHA,,),
+ CS_ENTRY(0x0092, TLS,RSA,PSK,WITH,RC4,128,SHA,),
+ CS_ENTRY(0x0092, RSA,PSK,RC4,SHA,,,,),
+ CS_ENTRY(0x0093, TLS,RSA,PSK,WITH,3DES,EDE,CBC,SHA),
+ CS_ENTRY(0x0093, RSA,PSK,3DES,EDE,CBC,SHA,,),
+ CS_ENTRY(0x00A0, TLS,DH,RSA,WITH,AES,128,GCM,SHA256),
+ CS_ENTRY(0x00A0, DH,RSA,AES128,GCM,SHA256,,,),
+ CS_ENTRY(0x00A1, TLS,DH,RSA,WITH,AES,256,GCM,SHA384),
+ CS_ENTRY(0x00A1, DH,RSA,AES256,GCM,SHA384,,,),
+ CS_ENTRY(0x00A2, TLS,DHE,DSS,WITH,AES,128,GCM,SHA256),
+ CS_ENTRY(0x00A2, DHE,DSS,AES128,GCM,SHA256,,,),
+ CS_ENTRY(0x00A3, TLS,DHE,DSS,WITH,AES,256,GCM,SHA384),
+ CS_ENTRY(0x00A3, DHE,DSS,AES256,GCM,SHA384,,,),
+ CS_ENTRY(0x00A4, TLS,DH,DSS,WITH,AES,128,GCM,SHA256),
+ CS_ENTRY(0x00A4, DH,DSS,AES128,GCM,SHA256,,,),
+ CS_ENTRY(0x00A5, TLS,DH,DSS,WITH,AES,256,GCM,SHA384),
+ CS_ENTRY(0x00A5, DH,DSS,AES256,GCM,SHA384,,,),
+ CS_ENTRY(0x00A6, TLS,DH,anon,WITH,AES,128,GCM,SHA256),
+ CS_ENTRY(0x00A6, ADH,AES128,GCM,SHA256,,,,),
+ CS_ENTRY(0x00A7, TLS,DH,anon,WITH,AES,256,GCM,SHA384),
+ CS_ENTRY(0x00A7, ADH,AES256,GCM,SHA384,,,,),
+ CS_ENTRY(0xC002, TLS,ECDH,ECDSA,WITH,RC4,128,SHA,),
+ CS_ENTRY(0xC002, ECDH,ECDSA,RC4,SHA,,,,),
+ CS_ENTRY(0xC007, TLS,ECDHE,ECDSA,WITH,RC4,128,SHA,),
+ CS_ENTRY(0xC007, ECDHE,ECDSA,RC4,SHA,,,,),
+ CS_ENTRY(0xC00C, TLS,ECDH,RSA,WITH,RC4,128,SHA,),
+ CS_ENTRY(0xC00C, ECDH,RSA,RC4,SHA,,,,),
+ CS_ENTRY(0xC011, TLS,ECDHE,RSA,WITH,RC4,128,SHA,),
+ CS_ENTRY(0xC011, ECDHE,RSA,RC4,SHA,,,,),
+ CS_ENTRY(0xC015, TLS,ECDH,anon,WITH,NULL,SHA,,),
+ CS_ENTRY(0xC015, AECDH,NULL,SHA,,,,,),
+ CS_ENTRY(0xC016, TLS,ECDH,anon,WITH,RC4,128,SHA,),
+ CS_ENTRY(0xC016, AECDH,RC4,SHA,,,,,),
+ CS_ENTRY(0xC017, TLS,ECDH,anon,WITH,3DES,EDE,CBC,SHA),
+ CS_ENTRY(0xC017, AECDH,DES,CBC3,SHA,,,,),
+ CS_ENTRY(0xC018, TLS,ECDH,anon,WITH,AES,128,CBC,SHA),
+ CS_ENTRY(0xC018, AECDH,AES128,SHA,,,,,),
+ CS_ENTRY(0xC019, TLS,ECDH,anon,WITH,AES,256,CBC,SHA),
+ CS_ENTRY(0xC019, AECDH,AES256,SHA,,,,,),
+#endif
#if defined(USE_MBEDTLS)
- /* entries marked ns are "non-standard", they are not in openssl */
+ /* entries marked ns are "non-standard", they are not in OpenSSL */
CS_ENTRY(0x0041, TLS,RSA,WITH,CAMELLIA,128,CBC,SHA,),
CS_ENTRY(0x0041, CAMELLIA128,SHA,,,,,,),
CS_ENTRY(0x0045, TLS,DHE,RSA,WITH,CAMELLIA,128,CBC,SHA),
@@ -713,4 +880,5 @@ int Curl_cipher_suite_get_str(uint16_t id, char *buf, size_t buf_size,
return r;
}
-#endif /* defined(USE_MBEDTLS) || defined(USE_BEARSSL) */
+#endif /* defined(USE_SECTRANSP) || defined(USE_MBEDTLS) || \
+ defined(USE_BEARSSL) */
diff --git a/libs/libcurl/src/vtls/cipher_suite.h b/libs/libcurl/src/vtls/cipher_suite.h
index 712325e9f3..4f66070d43 100644
--- a/libs/libcurl/src/vtls/cipher_suite.h
+++ b/libs/libcurl/src/vtls/cipher_suite.h
@@ -26,7 +26,7 @@
#include "curl_setup.h"
-#if defined(USE_MBEDTLS) || defined(USE_BEARSSL)
+#if defined(USE_SECTRANSP) || defined(USE_MBEDTLS) || defined(USE_BEARSSL)
#include <stdint.h>
/* Lookup IANA id for cipher suite string, returns 0 if not recognized */
@@ -42,5 +42,6 @@ uint16_t Curl_cipher_suite_walk_str(const char **str, const char **end);
int Curl_cipher_suite_get_str(uint16_t id, char *buf, size_t buf_size,
bool prefer_rfc);
-#endif /* defined(USE_MBEDTLS) || defined(USE_BEARSSL) */
+#endif /* defined(USE_SECTRANSP) || defined(USE_MBEDTLS) || \
+ defined(USE_BEARSSL) */
#endif /* HEADER_CURL_CIPHER_SUITE_H */
diff --git a/libs/libcurl/src/vtls/gtls.c b/libs/libcurl/src/vtls/gtls.c
index ef7f6059a3..aa838b22a7 100644
--- a/libs/libcurl/src/vtls/gtls.c
+++ b/libs/libcurl/src/vtls/gtls.c
@@ -26,7 +26,7 @@
* Source file for all GnuTLS-specific code for the TLS/SSL layer. No code
* but vtls.c should ever call or use these functions.
*
- * Note: don't use the GnuTLS' *_t variable type names in this source code,
+ * Note: do not use the GnuTLS' *_t variable type names in this source code,
* since they were not present in 1.0.X.
*/
@@ -125,7 +125,7 @@ static ssize_t gtls_pull(void *s, void *buf, size_t blen)
CURLcode result;
DEBUGASSERT(data);
- if(!backend->gtls.trust_setup) {
+ if(!backend->gtls.shared_creds->trust_setup) {
result = Curl_gtls_client_trust_setup(cf, data, &backend->gtls);
if(result) {
gnutls_transport_set_errno(backend->gtls.session, EINVAL);
@@ -251,6 +251,7 @@ static CURLcode handshake(struct Curl_cfilter *cf,
DEBUGASSERT(backend);
session = backend->gtls.session;
+ connssl->connecting_state = ssl_connect_2;
for(;;) {
timediff_t timeout_ms;
@@ -265,14 +266,13 @@ static CURLcode handshake(struct Curl_cfilter *cf,
return CURLE_OPERATION_TIMEDOUT;
}
- /* if ssl is expecting something, check if it's available. */
- if(connssl->connecting_state == ssl_connect_2_reading
- || connssl->connecting_state == ssl_connect_2_writing) {
+ /* if ssl is expecting something, check if it is available. */
+ if(connssl->io_need) {
int what;
- curl_socket_t writefd = ssl_connect_2_writing ==
- connssl->connecting_state?sockfd:CURL_SOCKET_BAD;
- curl_socket_t readfd = ssl_connect_2_reading ==
- connssl->connecting_state?sockfd:CURL_SOCKET_BAD;
+ curl_socket_t writefd = (connssl->io_need & CURL_SSL_IO_NEED_SEND)?
+ sockfd:CURL_SOCKET_BAD;
+ curl_socket_t readfd = (connssl->io_need & CURL_SSL_IO_NEED_RECV)?
+ sockfd:CURL_SOCKET_BAD;
what = Curl_socket_check(readfd, CURL_SOCKET_BAD, writefd,
nonblocking?0:
@@ -294,10 +294,11 @@ static CURLcode handshake(struct Curl_cfilter *cf,
/* socket is readable or writable */
}
+ connssl->io_need = CURL_SSL_IO_NEED_NONE;
backend->gtls.io_result = CURLE_OK;
rc = gnutls_handshake(session);
- if(!backend->gtls.trust_setup) {
+ if(!backend->gtls.shared_creds->trust_setup) {
/* After having send off the ClientHello, we prepare the trust
* store to verify the coming certificate from the server */
CURLcode result = Curl_gtls_client_trust_setup(cf, data, &backend->gtls);
@@ -306,16 +307,16 @@ static CURLcode handshake(struct Curl_cfilter *cf,
}
if((rc == GNUTLS_E_AGAIN) || (rc == GNUTLS_E_INTERRUPTED)) {
- connssl->connecting_state =
+ connssl->io_need =
gnutls_record_get_direction(session)?
- ssl_connect_2_writing:ssl_connect_2_reading;
+ CURL_SSL_IO_NEED_SEND:CURL_SSL_IO_NEED_RECV;
continue;
}
else if((rc < 0) && !gnutls_error_is_fatal(rc)) {
const char *strerr = NULL;
if(rc == GNUTLS_E_WARNING_ALERT_RECEIVED) {
- int alert = gnutls_alert_get(session);
+ gnutls_alert_description_t alert = gnutls_alert_get(session);
strerr = gnutls_alert_get_name(alert);
}
@@ -332,7 +333,7 @@ static CURLcode handshake(struct Curl_cfilter *cf,
const char *strerr = NULL;
if(rc == GNUTLS_E_FATAL_ALERT_RECEIVED) {
- int alert = gnutls_alert_get(session);
+ gnutls_alert_description_t alert = gnutls_alert_get(session);
strerr = gnutls_alert_get_name(alert);
}
@@ -376,9 +377,15 @@ set_ssl_version_min_max(struct Curl_easy *data,
long ssl_version = conn_config->version;
long ssl_version_max = conn_config->version_max;
+ if((ssl_version == CURL_SSLVERSION_DEFAULT) ||
+ (ssl_version == CURL_SSLVERSION_TLSv1))
+ ssl_version = CURL_SSLVERSION_TLSv1_0;
+ if(ssl_version_max == CURL_SSLVERSION_MAX_NONE)
+ ssl_version_max = CURL_SSLVERSION_MAX_DEFAULT;
+
if(peer->transport == TRNSPRT_QUIC) {
- if((ssl_version != CURL_SSLVERSION_DEFAULT) &&
- (ssl_version < CURL_SSLVERSION_TLSv1_3)) {
+ if((ssl_version_max != CURL_SSLVERSION_MAX_DEFAULT) &&
+ (ssl_version_max < CURL_SSLVERSION_MAX_TLSv1_3)) {
failf(data, "QUIC needs at least TLS version 1.3");
return CURLE_SSL_CONNECT_ERROR;
}
@@ -386,13 +393,8 @@ set_ssl_version_min_max(struct Curl_easy *data,
return CURLE_OK;
}
- if((ssl_version == CURL_SSLVERSION_DEFAULT) ||
- (ssl_version == CURL_SSLVERSION_TLSv1))
- ssl_version = CURL_SSLVERSION_TLSv1_0;
- if(ssl_version_max == CURL_SSLVERSION_MAX_NONE)
- ssl_version_max = CURL_SSLVERSION_MAX_DEFAULT;
if(!tls13support) {
- /* If the running GnuTLS doesn't support TLS 1.3, we must not specify a
+ /* If the running GnuTLS does not support TLS 1.3, we must not specify a
prioritylist involving that since it will make GnuTLS return an en
error back at us */
if((ssl_version_max == CURL_SSLVERSION_MAX_TLSv1_3) ||
@@ -450,20 +452,67 @@ set_ssl_version_min_max(struct Curl_easy *data,
return CURLE_SSL_CONNECT_ERROR;
}
-CURLcode Curl_gtls_client_trust_setup(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- struct gtls_ctx *gtls)
+CURLcode Curl_gtls_shared_creds_create(struct Curl_easy *data,
+ struct gtls_shared_creds **pcreds)
+{
+ struct gtls_shared_creds *shared;
+ int rc;
+
+ *pcreds = NULL;
+ shared = calloc(1, sizeof(*shared));
+ if(!shared)
+ return CURLE_OUT_OF_MEMORY;
+
+ rc = gnutls_certificate_allocate_credentials(&shared->creds);
+ if(rc != GNUTLS_E_SUCCESS) {
+ failf(data, "gnutls_cert_all_cred() failed: %s", gnutls_strerror(rc));
+ free(shared);
+ return CURLE_SSL_CONNECT_ERROR;
+ }
+
+ shared->refcount = 1;
+ shared->time = Curl_now();
+ *pcreds = shared;
+ return CURLE_OK;
+}
+
+CURLcode Curl_gtls_shared_creds_up_ref(struct gtls_shared_creds *creds)
+{
+ DEBUGASSERT(creds);
+ if(creds->refcount < SIZE_T_MAX) {
+ ++creds->refcount;
+ return CURLE_OK;
+ }
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+}
+
+void Curl_gtls_shared_creds_free(struct gtls_shared_creds **pcreds)
+{
+ struct gtls_shared_creds *shared = *pcreds;
+ *pcreds = NULL;
+ if(shared) {
+ --shared->refcount;
+ if(!shared->refcount) {
+ gnutls_certificate_free_credentials(shared->creds);
+ free(shared->CAfile);
+ free(shared);
+ }
+ }
+}
+
+static CURLcode gtls_populate_creds(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ gnutls_certificate_credentials_t creds)
{
struct ssl_primary_config *config = Curl_ssl_cf_get_primary_config(cf);
struct ssl_config_data *ssl_config = Curl_ssl_cf_get_config(cf, data);
int rc;
- CURL_TRC_CF(data, cf, "setup trust anchors and CRLs");
if(config->verifypeer) {
bool imported_native_ca = false;
if(ssl_config->native_ca_store) {
- rc = gnutls_certificate_set_x509_system_trust(gtls->cred);
+ rc = gnutls_certificate_set_x509_system_trust(creds);
if(rc < 0)
infof(data, "error reading native ca store (%s), continuing anyway",
gnutls_strerror(rc));
@@ -476,10 +525,10 @@ CURLcode Curl_gtls_client_trust_setup(struct Curl_cfilter *cf,
if(config->CAfile) {
/* set the trusted CA cert bundle file */
- gnutls_certificate_set_verify_flags(gtls->cred,
+ gnutls_certificate_set_verify_flags(creds,
GNUTLS_VERIFY_ALLOW_X509_V1_CA_CRT);
- rc = gnutls_certificate_set_x509_trust_file(gtls->cred,
+ rc = gnutls_certificate_set_x509_trust_file(creds,
config->CAfile,
GNUTLS_X509_FMT_PEM);
if(rc < 0) {
@@ -497,8 +546,7 @@ CURLcode Curl_gtls_client_trust_setup(struct Curl_cfilter *cf,
if(config->CApath) {
/* set the trusted CA cert directory */
- rc = gnutls_certificate_set_x509_trust_dir(gtls->cred,
- config->CApath,
+ rc = gnutls_certificate_set_x509_trust_dir(creds, config->CApath,
GNUTLS_X509_FMT_PEM);
if(rc < 0) {
infof(data, "error reading ca cert file %s (%s)%s",
@@ -516,8 +564,7 @@ CURLcode Curl_gtls_client_trust_setup(struct Curl_cfilter *cf,
if(config->CRLfile) {
/* set the CRL list file */
- rc = gnutls_certificate_set_x509_crl_file(gtls->cred,
- config->CRLfile,
+ rc = gnutls_certificate_set_x509_crl_file(creds, config->CRLfile,
GNUTLS_X509_FMT_PEM);
if(rc < 0) {
failf(data, "error reading crl file %s (%s)",
@@ -528,7 +575,141 @@ CURLcode Curl_gtls_client_trust_setup(struct Curl_cfilter *cf,
infof(data, "found %d CRL in %s", rc, config->CRLfile);
}
- gtls->trust_setup = TRUE;
+ return CURLE_OK;
+}
+
+/* key to use at `multi->proto_hash` */
+#define MPROTO_GTLS_X509_KEY "tls:gtls:x509:share"
+
+static bool gtls_shared_creds_expired(const struct Curl_easy *data,
+ const struct gtls_shared_creds *sc)
+{
+ const struct ssl_general_config *cfg = &data->set.general_ssl;
+ struct curltime now = Curl_now();
+ timediff_t elapsed_ms = Curl_timediff(now, sc->time);
+ timediff_t timeout_ms = cfg->ca_cache_timeout * (timediff_t)1000;
+
+ if(timeout_ms < 0)
+ return false;
+
+ return elapsed_ms >= timeout_ms;
+}
+
+static bool gtls_shared_creds_different(struct Curl_cfilter *cf,
+ const struct gtls_shared_creds *sc)
+{
+ struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
+ if(!sc->CAfile || !conn_config->CAfile)
+ return sc->CAfile != conn_config->CAfile;
+
+ return strcmp(sc->CAfile, conn_config->CAfile);
+}
+
+static struct gtls_shared_creds*
+gtls_get_cached_creds(struct Curl_cfilter *cf, struct Curl_easy *data)
+{
+ struct gtls_shared_creds *shared_creds;
+
+ if(data->multi) {
+ shared_creds = Curl_hash_pick(&data->multi->proto_hash,
+ (void *)MPROTO_GTLS_X509_KEY,
+ sizeof(MPROTO_GTLS_X509_KEY)-1);
+ if(shared_creds && shared_creds->creds &&
+ !gtls_shared_creds_expired(data, shared_creds) &&
+ !gtls_shared_creds_different(cf, shared_creds)) {
+ return shared_creds;
+ }
+ }
+ return NULL;
+}
+
+static void gtls_shared_creds_hash_free(void *key, size_t key_len, void *p)
+{
+ struct gtls_shared_creds *sc = p;
+ DEBUGASSERT(key_len == (sizeof(MPROTO_GTLS_X509_KEY)-1));
+ DEBUGASSERT(!memcmp(MPROTO_GTLS_X509_KEY, key, key_len));
+ (void)key;
+ (void)key_len;
+ Curl_gtls_shared_creds_free(&sc); /* down reference */
+}
+
+static void gtls_set_cached_creds(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct gtls_shared_creds *sc)
+{
+ struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
+
+ DEBUGASSERT(sc);
+ DEBUGASSERT(sc->creds);
+ DEBUGASSERT(!sc->CAfile);
+ DEBUGASSERT(sc->refcount == 1);
+ if(!data->multi)
+ return;
+
+ if(conn_config->CAfile) {
+ sc->CAfile = strdup(conn_config->CAfile);
+ if(!sc->CAfile)
+ return;
+ }
+
+ if(Curl_gtls_shared_creds_up_ref(sc))
+ return;
+
+ if(!Curl_hash_add2(&data->multi->proto_hash,
+ (void *)MPROTO_GTLS_X509_KEY,
+ sizeof(MPROTO_GTLS_X509_KEY)-1,
+ sc, gtls_shared_creds_hash_free)) {
+ Curl_gtls_shared_creds_free(&sc); /* down reference again */
+ return;
+ }
+}
+
+CURLcode Curl_gtls_client_trust_setup(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct gtls_ctx *gtls)
+{
+ struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
+ struct ssl_config_data *ssl_config = Curl_ssl_cf_get_config(cf, data);
+ struct gtls_shared_creds *cached_creds = NULL;
+ bool cache_criteria_met;
+ CURLcode result;
+ int rc;
+
+
+ /* Consider the X509 store cacheable if it comes exclusively from a CAfile,
+ or no source is provided and we are falling back to OpenSSL's built-in
+ default. */
+ cache_criteria_met = (data->set.general_ssl.ca_cache_timeout != 0) &&
+ conn_config->verifypeer &&
+ !conn_config->CApath &&
+ !conn_config->ca_info_blob &&
+ !ssl_config->primary.CRLfile &&
+ !ssl_config->native_ca_store &&
+ !conn_config->clientcert; /* GnuTLS adds client cert to its credentials! */
+
+ if(cache_criteria_met)
+ cached_creds = gtls_get_cached_creds(cf, data);
+
+ if(cached_creds && !Curl_gtls_shared_creds_up_ref(cached_creds)) {
+ CURL_TRC_CF(data, cf, "using shared trust anchors and CRLs");
+ Curl_gtls_shared_creds_free(&gtls->shared_creds);
+ gtls->shared_creds = cached_creds;
+ rc = gnutls_credentials_set(gtls->session, GNUTLS_CRD_CERTIFICATE,
+ gtls->shared_creds->creds);
+ if(rc != GNUTLS_E_SUCCESS) {
+ failf(data, "gnutls_credentials_set() failed: %s", gnutls_strerror(rc));
+ return CURLE_SSL_CONNECT_ERROR;
+ }
+ }
+ else {
+ CURL_TRC_CF(data, cf, "loading trust anchors and CRLs");
+ result = gtls_populate_creds(cf, data, gtls->shared_creds->creds);
+ if(result)
+ return result;
+ gtls->shared_creds->trust_setup = TRUE;
+ if(cache_criteria_met)
+ gtls_set_cached_creds(cf, data, gtls->shared_creds);
+ }
return CURLE_OK;
}
@@ -546,7 +727,7 @@ static CURLcode gtls_update_session_id(struct Curl_cfilter *cf,
struct ssl_connect_data *connssl = cf->ctx;
CURLcode result = CURLE_OK;
- if(ssl_config->primary.sessionid) {
+ if(ssl_config->primary.cache_session) {
/* we always unconditionally get the session id here, as even if we
already got it from the cache and asked to use it in the connection, it
might've been rejected and then a new one is in use now and we need to
@@ -561,27 +742,16 @@ static CURLcode gtls_update_session_id(struct Curl_cfilter *cf,
return CURLE_OUT_OF_MEMORY;
}
else {
- bool incache;
- void *ssl_sessionid;
-
/* extract session ID to the allocated buffer */
gnutls_session_get_data(session, connect_sessionid, &connect_idsize);
- DEBUGF(infof(data, "get session id (len=%zu) and store in cache",
- connect_idsize));
+ CURL_TRC_CF(data, cf, "get session id (len=%zu) and store in cache",
+ connect_idsize);
Curl_ssl_sessionid_lock(data);
- incache = !(Curl_ssl_getsessionid(cf, data, &connssl->peer,
- &ssl_sessionid, NULL));
- if(incache) {
- /* there was one before in the cache, so instead of risking that the
- previous one was rejected, we just kill that and store the new */
- Curl_ssl_delsessionid(data, ssl_sessionid);
- }
-
/* store this session id, takes ownership */
- result = Curl_ssl_addsessionid(cf, data, &connssl->peer,
- connect_sessionid, connect_idsize,
- gtls_sessionid_free);
+ result = Curl_ssl_set_sessionid(cf, data, &connssl->peer,
+ connect_sessionid, connect_idsize,
+ gtls_sessionid_free);
Curl_ssl_sessionid_unlock(data);
}
}
@@ -599,8 +769,8 @@ static int gtls_handshake_cb(gnutls_session_t session, unsigned int htype,
if(when) { /* after message has been processed */
struct Curl_easy *data = CF_DATA_CURRENT(cf);
if(data) {
- DEBUGF(infof(data, "handshake: %s message type %d",
- incoming? "incoming" : "outgoing", htype));
+ CURL_TRC_CF(data, cf, "handshake: %s message type %d",
+ incoming? "incoming" : "outgoing", htype);
switch(htype) {
case GNUTLS_HANDSHAKE_NEW_SESSION_TICKET: {
gtls_update_session_id(cf, data, session);
@@ -639,12 +809,10 @@ static CURLcode gtls_client_init(struct Curl_cfilter *cf,
else if(config->version == CURL_SSLVERSION_SSLv3)
sni = FALSE; /* SSLv3 has no SNI */
- /* allocate a cred struct */
- rc = gnutls_certificate_allocate_credentials(&gtls->cred);
- if(rc != GNUTLS_E_SUCCESS) {
- failf(data, "gnutls_cert_all_cred() failed: %s", gnutls_strerror(rc));
- return CURLE_SSL_CONNECT_ERROR;
- }
+ /* allocate a shared creds struct */
+ result = Curl_gtls_shared_creds_create(data, &gtls->shared_creds);
+ if(result)
+ return result;
#ifdef USE_GNUTLS_SRP
if(config->username && Curl_auth_allowed_to_host(data)) {
@@ -705,7 +873,7 @@ static CURLcode gtls_client_init(struct Curl_cfilter *cf,
tls13support = gnutls_check_version("3.6.5");
/* Ensure +SRP comes at the *end* of all relevant strings so that it can be
- * removed if a run-time error indicates that SRP is not supported by this
+ * removed if a runtime error indicates that SRP is not supported by this
* GnuTLS version */
if(config->version == CURL_SSLVERSION_SSLv2 ||
@@ -756,7 +924,7 @@ static CURLcode gtls_client_init(struct Curl_cfilter *cf,
}
if(config->clientcert) {
- if(!gtls->trust_setup) {
+ if(!gtls->shared_creds->trust_setup) {
result = Curl_gtls_client_trust_setup(cf, data, gtls);
if(result)
return result;
@@ -768,7 +936,7 @@ static CURLcode gtls_client_init(struct Curl_cfilter *cf,
GNUTLS_PKCS_USE_PBES2_AES_128 | GNUTLS_PKCS_USE_PBES2_AES_192 |
GNUTLS_PKCS_USE_PBES2_AES_256;
rc = gnutls_certificate_set_x509_key_file2(
- gtls->cred,
+ gtls->shared_creds->creds,
config->clientcert,
ssl_config->key ? ssl_config->key : config->clientcert,
do_file_type(ssl_config->cert_type),
@@ -783,7 +951,7 @@ static CURLcode gtls_client_init(struct Curl_cfilter *cf,
}
else {
if(gnutls_certificate_set_x509_key_file(
- gtls->cred,
+ gtls->shared_creds->creds,
config->clientcert,
ssl_config->key ? ssl_config->key : config->clientcert,
do_file_type(ssl_config->cert_type) ) !=
@@ -808,7 +976,7 @@ static CURLcode gtls_client_init(struct Curl_cfilter *cf,
#endif
{
rc = gnutls_credentials_set(gtls->session, GNUTLS_CRD_CERTIFICATE,
- gtls->cred);
+ gtls->shared_creds->creds);
if(rc != GNUTLS_E_SUCCESS) {
failf(data, "gnutls_credentials_set() failed: %s", gnutls_strerror(rc));
return CURLE_SSL_CONNECT_ERROR;
@@ -903,7 +1071,7 @@ CURLcode Curl_gtls_ctx_init(struct gtls_ctx *gctx,
/* This might be a reconnect, so we check for a session ID in the cache
to speed up things */
- if(conn_config->sessionid) {
+ if(conn_config->cache_session) {
void *ssl_sessionid;
size_t ssl_idsize;
@@ -979,7 +1147,7 @@ static CURLcode pkp_pin_peer_pubkey(struct Curl_easy *data,
/* Result is returned to caller */
CURLcode result = CURLE_SSL_PINNEDPUBKEYNOTMATCH;
- /* if a path wasn't specified, don't pin */
+ /* if a path was not specified, do not pin */
if(!pinnedpubkey)
return CURLE_OK;
@@ -1045,7 +1213,7 @@ Curl_gtls_verifyserver(struct Curl_easy *data,
CURLcode result = CURLE_OK;
#ifndef CURL_DISABLE_VERBOSE_STRINGS
const char *ptr;
- unsigned int algo;
+ int algo;
unsigned int bits;
gnutls_protocol_t version = gnutls_protocol_get_version(session);
#endif
@@ -1087,13 +1255,13 @@ Curl_gtls_verifyserver(struct Curl_easy *data,
}
#endif
}
- infof(data, " common name: WARNING couldn't obtain");
+ infof(data, " common name: WARNING could not obtain");
}
if(data->set.ssl.certinfo && chainp) {
unsigned int i;
- result = Curl_ssl_init_certinfo(data, cert_list_size);
+ result = Curl_ssl_init_certinfo(data, (int)cert_list_size);
if(result)
return result;
@@ -1101,7 +1269,7 @@ Curl_gtls_verifyserver(struct Curl_easy *data,
const char *beg = (const char *) chainp[i].data;
const char *end = beg + chainp[i].size;
- result = Curl_extract_certinfo(data, i, beg, end);
+ result = Curl_extract_certinfo(data, (int)i, beg, end);
if(result)
return result;
}
@@ -1258,7 +1426,7 @@ Curl_gtls_verifyserver(struct Curl_easy *data,
gnutls_x509_crt_init(&x509_issuer);
issuerp = load_file(config->issuercert);
gnutls_x509_crt_import(x509_issuer, &issuerp, GNUTLS_X509_FMT_PEM);
- rc = gnutls_x509_crt_check_issuer(x509_cert, x509_issuer);
+ rc = (int)gnutls_x509_crt_check_issuer(x509_cert, x509_issuer);
gnutls_x509_crt_deinit(x509_issuer);
unload_file(issuerp);
if(rc <= 0) {
@@ -1287,9 +1455,15 @@ Curl_gtls_verifyserver(struct Curl_easy *data,
in RFC2818 (HTTPS), which takes into account wildcards, and the subject
alternative name PKIX extension. Returns non zero on success, and zero on
failure. */
- rc = gnutls_x509_crt_check_hostname(x509_cert, peer->hostname);
+
+ /* This function does not handle trailing dots, so if we have an SNI name
+ use that and fallback to the hostname only if there is no SNI (like for
+ IP addresses) */
+ rc = (int)gnutls_x509_crt_check_hostname(x509_cert,
+ peer->sni ? peer->sni :
+ peer->hostname);
#if GNUTLS_VERSION_NUMBER < 0x030306
- /* Before 3.3.6, gnutls_x509_crt_check_hostname() didn't check IP
+ /* Before 3.3.6, gnutls_x509_crt_check_hostname() did not check IP
addresses. */
if(!rc) {
#ifdef USE_IPV6
@@ -1315,7 +1489,7 @@ Curl_gtls_verifyserver(struct Curl_easy *data,
size_t certaddrlen = sizeof(certaddr);
int ret = gnutls_x509_crt_get_subject_alt_name(x509_cert, i, certaddr,
&certaddrlen, NULL);
- /* If this happens, it wasn't an IP address. */
+ /* If this happens, it was not an IP address. */
if(ret == GNUTLS_E_SHORT_MEMORY_BUFFER)
continue;
if(ret < 0)
@@ -1333,7 +1507,7 @@ Curl_gtls_verifyserver(struct Curl_easy *data,
if(!rc) {
if(config->verifyhost) {
failf(data, "SSL: certificate subject name (%s) does not match "
- "target host name '%s'", certname, peer->dispname);
+ "target hostname '%s'", certname, peer->dispname);
gnutls_x509_crt_deinit(x509_cert);
return CURLE_PEER_FAILED_VERIFICATION;
}
@@ -1422,7 +1596,7 @@ Curl_gtls_verifyserver(struct Curl_easy *data,
/* public key algorithm's parameters */
algo = gnutls_x509_crt_get_pk_algorithm(x509_cert, &bits);
infof(data, " certificate public key: %s",
- gnutls_pk_algorithm_get_name(algo));
+ gnutls_pk_algorithm_get_name((gnutls_pk_algorithm_t)algo));
/* version of the X.509 certificate. */
infof(data, " certificate version: #%d",
@@ -1506,8 +1680,8 @@ out:
*/
/* We use connssl->connecting_state to keep track of the connection status;
there are three states: 'ssl_connect_1' (not started yet or complete),
- 'ssl_connect_2_reading' (waiting for data from server), and
- 'ssl_connect_2_writing' (waiting to be able to write).
+ 'ssl_connect_2' (doing handshake with the server), and
+ 'ssl_connect_3' (verifying and getting stats).
*/
static CURLcode
gtls_connect_common(struct Curl_cfilter *cf,
@@ -1516,7 +1690,7 @@ gtls_connect_common(struct Curl_cfilter *cf,
bool *done)
{
struct ssl_connect_data *connssl = cf->ctx;
- int rc;
+ CURLcode rc;
CURLcode result = CURLE_OK;
/* Initiate the connection, if not already done */
@@ -1620,118 +1794,104 @@ static ssize_t gtls_send(struct Curl_cfilter *cf,
return rc;
}
-static void gtls_close(struct Curl_cfilter *cf,
- struct Curl_easy *data)
+/*
+ * This function is called to shut down the SSL layer but keep the
+ * socket open (CCC - Clear Command Channel)
+ */
+static CURLcode gtls_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ bool send_shutdown, bool *done)
{
struct ssl_connect_data *connssl = cf->ctx;
struct gtls_ssl_backend_data *backend =
(struct gtls_ssl_backend_data *)connssl->backend;
+ char buf[1024];
+ CURLcode result = CURLE_OK;
+ ssize_t nread;
+ size_t i;
- (void) data;
DEBUGASSERT(backend);
+ if(!backend->gtls.session || cf->shutdown) {
+ *done = TRUE;
+ goto out;
+ }
- if(backend->gtls.session) {
- char buf[32];
- /* Maybe the server has already sent a close notify alert.
- Read it to avoid an RST on the TCP connection. */
- (void)gnutls_record_recv(backend->gtls.session, buf, sizeof(buf));
- gnutls_bye(backend->gtls.session, GNUTLS_SHUT_WR);
- gnutls_deinit(backend->gtls.session);
- backend->gtls.session = NULL;
+ connssl->io_need = CURL_SSL_IO_NEED_NONE;
+ *done = FALSE;
+
+ if(!backend->gtls.sent_shutdown) {
+ /* do this only once */
+ backend->gtls.sent_shutdown = TRUE;
+ if(send_shutdown) {
+ int ret = gnutls_bye(backend->gtls.session, GNUTLS_SHUT_RDWR);
+ if((ret == GNUTLS_E_AGAIN) || (ret == GNUTLS_E_INTERRUPTED)) {
+ CURL_TRC_CF(data, cf, "SSL shutdown, gnutls_bye EAGAIN");
+ connssl->io_need = gnutls_record_get_direction(backend->gtls.session)?
+ CURL_SSL_IO_NEED_SEND : CURL_SSL_IO_NEED_RECV;
+ result = CURLE_OK;
+ goto out;
+ }
+ if(ret != GNUTLS_E_SUCCESS) {
+ CURL_TRC_CF(data, cf, "SSL shutdown, gnutls_bye error: '%s'(%d)",
+ gnutls_strerror((int)ret), (int)ret);
+ result = CURLE_RECV_ERROR;
+ goto out;
+ }
+ }
}
- if(backend->gtls.cred) {
- gnutls_certificate_free_credentials(backend->gtls.cred);
- backend->gtls.cred = NULL;
+
+ /* SSL should now have started the shutdown from our side. Since it
+ * was not complete, we are lacking the close notify from the server. */
+ for(i = 0; i < 10; ++i) {
+ nread = gnutls_record_recv(backend->gtls.session, buf, sizeof(buf));
+ if(nread <= 0)
+ break;
}
-#ifdef USE_GNUTLS_SRP
- if(backend->gtls.srp_client_cred) {
- gnutls_srp_free_client_credentials(backend->gtls.srp_client_cred);
- backend->gtls.srp_client_cred = NULL;
+ if(nread > 0) {
+ /* still data coming in? */
}
-#endif
+ else if(nread == 0) {
+ /* We got the close notify alert and are done. */
+ *done = TRUE;
+ }
+ else if((nread == GNUTLS_E_AGAIN) || (nread == GNUTLS_E_INTERRUPTED)) {
+ connssl->io_need = gnutls_record_get_direction(backend->gtls.session)?
+ CURL_SSL_IO_NEED_SEND : CURL_SSL_IO_NEED_RECV;
+ }
+ else {
+ CURL_TRC_CF(data, cf, "SSL shutdown, error: '%s'(%d)",
+ gnutls_strerror((int)nread), (int)nread);
+ result = CURLE_RECV_ERROR;
+ }
+
+out:
+ cf->shutdown = (result || *done);
+ return result;
}
-/*
- * This function is called to shut down the SSL layer but keep the
- * socket open (CCC - Clear Command Channel)
- */
-static int gtls_shutdown(struct Curl_cfilter *cf,
- struct Curl_easy *data)
+static void gtls_close(struct Curl_cfilter *cf,
+ struct Curl_easy *data)
{
struct ssl_connect_data *connssl = cf->ctx;
struct gtls_ssl_backend_data *backend =
(struct gtls_ssl_backend_data *)connssl->backend;
- int retval = 0;
+ (void) data;
DEBUGASSERT(backend);
-
-#ifndef CURL_DISABLE_FTP
- /* This has only been tested on the proftpd server, and the mod_tls code
- sends a close notify alert without waiting for a close notify alert in
- response. Thus we wait for a close notify alert from the server, but
- we do not send one. Let's hope other servers do the same... */
-
- if(data->set.ftp_ccc == CURLFTPSSL_CCC_ACTIVE)
- gnutls_bye(backend->gtls.session, GNUTLS_SHUT_WR);
-#endif
-
+ CURL_TRC_CF(data, cf, "close");
if(backend->gtls.session) {
- ssize_t result;
- bool done = FALSE;
- char buf[120];
-
- while(!done && !connssl->peer_closed) {
- int what = SOCKET_READABLE(Curl_conn_cf_get_socket(cf, data),
- SSL_SHUTDOWN_TIMEOUT);
- if(what > 0) {
- /* Something to read, let's do it and hope that it is the close
- notify alert from the server */
- result = gnutls_record_recv(backend->gtls.session,
- buf, sizeof(buf));
- switch(result) {
- case 0:
- /* This is the expected response. There was no data but only
- the close notify alert */
- done = TRUE;
- break;
- case GNUTLS_E_AGAIN:
- case GNUTLS_E_INTERRUPTED:
- infof(data, "GNUTLS_E_AGAIN || GNUTLS_E_INTERRUPTED");
- break;
- default:
- retval = -1;
- done = TRUE;
- break;
- }
- }
- else if(0 == what) {
- /* timeout */
- failf(data, "SSL shutdown timeout");
- done = TRUE;
- }
- else {
- /* anything that gets here is fatally bad */
- failf(data, "select/poll on SSL socket, errno: %d", SOCKERRNO);
- retval = -1;
- done = TRUE;
- }
- }
gnutls_deinit(backend->gtls.session);
+ backend->gtls.session = NULL;
+ }
+ if(backend->gtls.shared_creds) {
+ Curl_gtls_shared_creds_free(&backend->gtls.shared_creds);
}
- gnutls_certificate_free_credentials(backend->gtls.cred);
-
#ifdef USE_GNUTLS_SRP
- {
- struct ssl_config_data *ssl_config = Curl_ssl_cf_get_config(cf, data);
- if(ssl_config->primary.username)
- gnutls_srp_free_client_credentials(backend->gtls.srp_client_cred);
+ if(backend->gtls.srp_client_cred) {
+ gnutls_srp_free_client_credentials(backend->gtls.srp_client_cred);
+ backend->gtls.srp_client_cred = NULL;
}
#endif
-
- backend->gtls.cred = NULL;
- backend->gtls.session = NULL;
-
- return retval;
}
static ssize_t gtls_recv(struct Curl_cfilter *cf,
@@ -1831,7 +1991,8 @@ const struct Curl_ssl Curl_ssl_gnutls = {
SSLSUPP_CA_PATH |
SSLSUPP_CERTINFO |
SSLSUPP_PINNEDPUBKEY |
- SSLSUPP_HTTPS_PROXY,
+ SSLSUPP_HTTPS_PROXY |
+ SSLSUPP_CA_CACHE,
sizeof(struct gtls_ssl_backend_data),
@@ -1856,7 +2017,6 @@ const struct Curl_ssl Curl_ssl_gnutls = {
gtls_sha256sum, /* sha256sum */
NULL, /* associate_connection */
NULL, /* disassociate_connection */
- NULL, /* free_multi_ssl_backend_data */
gtls_recv, /* recv decrypted data */
gtls_send, /* send data to encrypt */
};
diff --git a/libs/libcurl/src/vtls/gtls.h b/libs/libcurl/src/vtls/gtls.h
index 534b320287..2ba237a315 100644
--- a/libs/libcurl/src/vtls/gtls.h
+++ b/libs/libcurl/src/vtls/gtls.h
@@ -30,6 +30,7 @@
#ifdef USE_GNUTLS
#include <gnutls/gnutls.h>
+#include "timeval.h"
#ifdef HAVE_GNUTLS_SRP
/* the function exists */
@@ -45,14 +46,27 @@ struct ssl_primary_config;
struct ssl_config_data;
struct ssl_peer;
+struct gtls_shared_creds {
+ gnutls_certificate_credentials_t creds;
+ char *CAfile; /* CAfile path used to generate X509 store */
+ struct curltime time; /* when the shared creds was created */
+ size_t refcount;
+ BIT(trust_setup); /* x509 anchors + CRLs have been set up */
+};
+
+CURLcode Curl_gtls_shared_creds_create(struct Curl_easy *data,
+ struct gtls_shared_creds **pcreds);
+CURLcode Curl_gtls_shared_creds_up_ref(struct gtls_shared_creds *creds);
+void Curl_gtls_shared_creds_free(struct gtls_shared_creds **pcreds);
+
struct gtls_ctx {
gnutls_session_t session;
- gnutls_certificate_credentials_t cred;
+ struct gtls_shared_creds *shared_creds;
#ifdef USE_GNUTLS_SRP
gnutls_srp_client_credentials_t srp_client_cred;
#endif
CURLcode io_result; /* result of last IO cfilter operation */
- BIT(trust_setup); /* x509 anchors + CRLs have been set up */
+ BIT(sent_shutdown);
};
typedef CURLcode Curl_gtls_ctx_setup_cb(struct Curl_cfilter *cf,
diff --git a/libs/libcurl/src/vtls/hostcheck.c b/libs/libcurl/src/vtls/hostcheck.c
index 3078ab8061..79ed869e87 100644
--- a/libs/libcurl/src/vtls/hostcheck.c
+++ b/libs/libcurl/src/vtls/hostcheck.c
@@ -62,7 +62,7 @@ static bool pmatch(const char *hostname, size_t hostlen,
* We use the matching rule described in RFC6125, section 6.4.3.
* https://datatracker.ietf.org/doc/html/rfc6125#section-6.4.3
*
- * In addition: ignore trailing dots in the host names and wildcards, so that
+ * In addition: ignore trailing dots in the hostnames and wildcards, so that
* the names are used normalized. This is what the browsers do.
*
* Do not allow wildcard matching on IP numbers. There are apparently
diff --git a/libs/libcurl/src/vtls/hostcheck.h b/libs/libcurl/src/vtls/hostcheck.h
index ce3e81fd42..bc4d40fbe8 100644
--- a/libs/libcurl/src/vtls/hostcheck.h
+++ b/libs/libcurl/src/vtls/hostcheck.h
@@ -26,7 +26,7 @@
#include <curl/curl.h>
-/* returns TRUE if there's a match */
+/* returns TRUE if there is a match */
bool Curl_cert_hostcheck(const char *match_pattern, size_t matchlen,
const char *hostname, size_t hostlen);
diff --git a/libs/libcurl/src/vtls/mbedtls.c b/libs/libcurl/src/vtls/mbedtls.c
index d4820c4c50..a6286f82d6 100644
--- a/libs/libcurl/src/vtls/mbedtls.c
+++ b/libs/libcurl/src/vtls/mbedtls.c
@@ -75,6 +75,7 @@
#include "mbedtls.h"
#include "vtls.h"
#include "vtls_int.h"
+#include "x509asn1.h"
#include "parsedate.h"
#include "connect.h" /* for the connect timeout */
#include "select.h"
@@ -110,6 +111,8 @@ struct mbed_ssl_backend_data {
const char *protocols[3];
#endif
int *ciphersuites;
+ BIT(initialized); /* mbedtls_ssl_context is initialized */
+ BIT(sent_shutdown);
};
/* apply threading? */
@@ -482,6 +485,20 @@ mbed_set_selected_ciphers(struct Curl_easy *data,
return CURLE_OK;
}
+#ifdef TLS13_SUPPORT
+static int mbed_no_verify(void *udata, mbedtls_x509_crt *crt,
+ int depth, uint32_t *flags)
+{
+ (void)udata;
+ (void)crt;
+ (void)depth;
+ /* we clear any faults the mbedtls' own verification found.
+ * See <https://github.com/Mbed-TLS/mbedtls/issues/9210> */
+ *flags = 0;
+ return 0;
+}
+#endif
+
static CURLcode
mbed_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
{
@@ -504,6 +521,7 @@ mbed_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
char errorbuf[128];
DEBUGASSERT(backend);
+ DEBUGASSERT(!backend->initialized);
if((conn_config->version == CURL_SSLVERSION_SSLv2) ||
(conn_config->version == CURL_SSLVERSION_SSLv3)) {
@@ -636,7 +654,7 @@ mbed_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
if(ret) {
mbedtls_strerror(ret, errorbuf, sizeof(errorbuf));
- failf(data, "Error reading private key %s - mbedTLS: (-0x%04X) %s",
+ failf(data, "Error reading client cert data %s - mbedTLS: (-0x%04X) %s",
ssl_config->key, -ret, errorbuf);
return CURLE_SSL_CERTPROBLEM;
}
@@ -737,8 +755,19 @@ mbed_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
failf(data, "mbedTLS: ssl_config failed");
return CURLE_SSL_CONNECT_ERROR;
}
+#ifdef TLS13_SUPPORT
+ if(!verifypeer) {
+ /* Default verify behaviour changed in mbedtls v3.6.0 with TLS v1.3.
+ * On 1.3 connections, the handshake fails by default without trust
+ * anchors. We override this questionable change by installing our
+ * own verify callback that clears all errors. */
+ mbedtls_ssl_conf_verify(&backend->config, mbed_no_verify, cf);
+ }
+#endif
+
mbedtls_ssl_init(&backend->ssl);
+ backend->initialized = TRUE;
/* new profile with RSA min key len = 1024 ... */
mbedtls_ssl_conf_cert_profile(&backend->config,
@@ -785,10 +814,11 @@ mbed_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
NULL /* rev_timeout() */);
if(conn_config->cipher_list) {
- ret = mbed_set_selected_ciphers(data, backend, conn_config->cipher_list);
- if(ret) {
+ CURLcode result = mbed_set_selected_ciphers(data, backend,
+ conn_config->cipher_list);
+ if(result != CURLE_OK) {
failf(data, "mbedTLS: failed to set cipher suites");
- return ret;
+ return result;
}
}
else {
@@ -807,8 +837,8 @@ mbed_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
MBEDTLS_SSL_SESSION_TICKETS_DISABLED);
#endif
- /* Check if there's a cached ID we can/should use here! */
- if(ssl_config->primary.sessionid) {
+ /* Check if there is a cached ID we can/should use here! */
+ if(ssl_config->primary.cache_session) {
void *old_session = NULL;
Curl_ssl_sessionid_lock(data);
@@ -854,7 +884,7 @@ mbed_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
for(i = 0; i < connssl->alpn->count; ++i) {
backend->protocols[i] = connssl->alpn->entries[i];
}
- /* this function doesn't clone the protocols array, which is why we need
+ /* this function does not clone the protocols array, which is why we need
to keep it around */
if(mbedtls_ssl_conf_alpn_protocols(&backend->config,
&backend->protocols[0])) {
@@ -880,11 +910,11 @@ mbed_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
/* give application a chance to interfere with mbedTLS set up. */
if(data->set.ssl.fsslctx) {
- ret = (*data->set.ssl.fsslctx)(data, &backend->config,
- data->set.ssl.fsslctxp);
- if(ret) {
+ CURLcode result = (*data->set.ssl.fsslctx)(data, &backend->config,
+ data->set.ssl.fsslctxp);
+ if(result != CURLE_OK) {
failf(data, "error signaled by ssl ctx callback");
- return ret;
+ return result;
}
}
@@ -893,6 +923,60 @@ mbed_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
return CURLE_OK;
}
+static int count_server_cert(const mbedtls_x509_crt *peercert)
+{
+ int count = 1;
+
+ DEBUGASSERT(peercert);
+
+ while(peercert->next) {
+ ++count;
+ peercert = peercert->next;
+ }
+ return count;
+}
+
+static CURLcode collect_server_cert_single(struct Curl_easy *data,
+ const mbedtls_x509_crt *server_cert,
+ int idx)
+{
+ const char *beg, *end;
+
+ DEBUGASSERT(server_cert);
+
+ beg = (const char *)server_cert->raw.p;
+ end = beg + server_cert->raw.len;
+ return Curl_extract_certinfo(data, idx, beg, end);
+}
+
+static CURLcode collect_server_cert(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ const struct mbedtls_x509_crt *peercert)
+{
+#ifndef CURL_DISABLE_VERBOSE_STRINGS
+ const bool show_verbose_server_cert = data->set.verbose;
+#else
+ const bool show_verbose_server_cert = false;
+#endif
+ struct ssl_config_data *ssl_config = Curl_ssl_cf_get_config(cf, data);
+ CURLcode result = CURLE_PEER_FAILED_VERIFICATION;
+ int i, count;
+
+ if(!show_verbose_server_cert && !ssl_config->certinfo)
+ return CURLE_OK;
+
+ if(!peercert)
+ return result;
+
+ count = count_server_cert(peercert);
+ result = Curl_ssl_init_certinfo(data, count);
+ for(i = 0 ; !result && peercert ; i++) {
+ result = collect_server_cert_single(data, peercert, i);
+ peercert = peercert->next;
+ }
+ return result;
+}
+
static CURLcode
mbed_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data)
{
@@ -902,8 +986,6 @@ mbed_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data)
(struct mbed_ssl_backend_data *)connssl->backend;
struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
const mbedtls_x509_crt *peercert;
- char cipher_str[64];
- uint16_t cipher_id;
#ifndef CURL_DISABLE_PROXY
const char * const pinnedpubkey = Curl_ssl_cf_is_proxy(cf)?
data->set.str[STRING_SSL_PINNEDPUBLICKEY_PROXY]:
@@ -917,26 +999,41 @@ mbed_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data)
ret = mbedtls_ssl_handshake(&backend->ssl);
if(ret == MBEDTLS_ERR_SSL_WANT_READ) {
- connssl->connecting_state = ssl_connect_2_reading;
+ connssl->io_need = CURL_SSL_IO_NEED_RECV;
return CURLE_OK;
}
else if(ret == MBEDTLS_ERR_SSL_WANT_WRITE) {
- connssl->connecting_state = ssl_connect_2_writing;
+ connssl->io_need = CURL_SSL_IO_NEED_SEND;
return CURLE_OK;
}
+ else if(ret == MBEDTLS_ERR_X509_CERT_VERIFY_FAILED) {
+ failf(data, "peer certificate could not be verified");
+ return CURLE_PEER_FAILED_VERIFICATION;
+ }
else if(ret) {
char errorbuf[128];
+#if MBEDTLS_VERSION_NUMBER >= 0x03020000
+ CURL_TRC_CF(data, cf, "TLS version %04X",
+ mbedtls_ssl_get_version_number(&backend->ssl));
+#endif
mbedtls_strerror(ret, errorbuf, sizeof(errorbuf));
- failf(data, "ssl_handshake returned - mbedTLS: (-0x%04X) %s",
+ failf(data, "ssl_handshake returned: (-0x%04X) %s",
-ret, errorbuf);
return CURLE_SSL_CONNECT_ERROR;
}
- cipher_id = (uint16_t)
- mbedtls_ssl_get_ciphersuite_id_from_ssl(&backend->ssl);
- mbed_cipher_suite_get_str(cipher_id, cipher_str, sizeof(cipher_str), true);
- infof(data, "mbedTLS: Handshake complete, cipher is %s", cipher_str);
-
+#if MBEDTLS_VERSION_NUMBER >= 0x03020000
+ {
+ char cipher_str[64];
+ uint16_t cipher_id;
+ cipher_id = (uint16_t)
+ mbedtls_ssl_get_ciphersuite_id_from_ssl(&backend->ssl);
+ mbed_cipher_suite_get_str(cipher_id, cipher_str, sizeof(cipher_str), true);
+ infof(data, "mbedTLS: Handshake complete, cipher is %s", cipher_str);
+ }
+#else
+ infof(data, "mbedTLS: Handshake complete");
+#endif
ret = mbedtls_ssl_get_verify_result(&backend->ssl);
if(!conn_config->verifyhost)
@@ -964,6 +1061,12 @@ mbed_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data)
peercert = mbedtls_ssl_get_peer_cert(&backend->ssl);
+ if(peercert) {
+ const CURLcode result = collect_server_cert(cf, data, peercert);
+ if(result)
+ return result;
+ }
+
if(peercert && data->set.verbose) {
#ifndef MBEDTLS_X509_REMOVE_INFO
const size_t bufsize = 16384;
@@ -1088,10 +1191,9 @@ mbed_connect_step3(struct Curl_cfilter *cf, struct Curl_easy *data)
DEBUGASSERT(ssl_connect_3 == connssl->connecting_state);
DEBUGASSERT(backend);
- if(ssl_config->primary.sessionid) {
+ if(ssl_config->primary.cache_session) {
int ret;
mbedtls_ssl_session *our_ssl_sessionid;
- void *old_ssl_sessionid = NULL;
our_ssl_sessionid = malloc(sizeof(mbedtls_ssl_session));
if(!our_ssl_sessionid)
@@ -1108,15 +1210,11 @@ mbed_connect_step3(struct Curl_cfilter *cf, struct Curl_easy *data)
return CURLE_SSL_CONNECT_ERROR;
}
- /* If there's already a matching session in the cache, delete it */
+ /* If there is already a matching session in the cache, delete it */
Curl_ssl_sessionid_lock(data);
- if(!Curl_ssl_getsessionid(cf, data, &connssl->peer,
- &old_ssl_sessionid, NULL))
- Curl_ssl_delsessionid(data, old_ssl_sessionid);
-
- retcode = Curl_ssl_addsessionid(cf, data, &connssl->peer,
- our_ssl_sessionid, 0,
- mbedtls_session_free);
+ retcode = Curl_ssl_set_sessionid(cf, data, &connssl->peer,
+ our_ssl_sessionid, 0,
+ mbedtls_session_free);
Curl_ssl_sessionid_unlock(data);
if(retcode)
return retcode;
@@ -1141,8 +1239,13 @@ static ssize_t mbed_send(struct Curl_cfilter *cf, struct Curl_easy *data,
ret = mbedtls_ssl_write(&backend->ssl, (unsigned char *)mem, len);
if(ret < 0) {
- *curlcode = (ret == MBEDTLS_ERR_SSL_WANT_WRITE) ?
- CURLE_AGAIN : CURLE_SEND_ERROR;
+ CURL_TRC_CF(data, cf, "mbedtls_ssl_write(len=%zu) -> -0x%04X",
+ len, -ret);
+ *curlcode = ((ret == MBEDTLS_ERR_SSL_WANT_WRITE)
+#ifdef TLS13_SUPPORT
+ || (ret == MBEDTLS_ERR_SSL_RECEIVED_NEW_SESSION_TICKET)
+#endif
+ )? CURLE_AGAIN : CURLE_SEND_ERROR;
ret = -1;
}
@@ -1154,33 +1257,120 @@ static void mbedtls_close_all(struct Curl_easy *data)
(void)data;
}
-static void mbedtls_close(struct Curl_cfilter *cf, struct Curl_easy *data)
+static CURLcode mbedtls_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ bool send_shutdown, bool *done)
{
struct ssl_connect_data *connssl = cf->ctx;
struct mbed_ssl_backend_data *backend =
(struct mbed_ssl_backend_data *)connssl->backend;
- char buf[32];
+ unsigned char buf[1024];
+ CURLcode result = CURLE_OK;
+ int ret;
+ size_t i;
- (void)data;
DEBUGASSERT(backend);
- /* Maybe the server has already sent a close notify alert.
- Read it to avoid an RST on the TCP connection. */
- (void)mbedtls_ssl_read(&backend->ssl, (unsigned char *)buf, sizeof(buf));
+ if(!backend->initialized || cf->shutdown) {
+ *done = TRUE;
+ return CURLE_OK;
+ }
+
+ connssl->io_need = CURL_SSL_IO_NEED_NONE;
+ *done = FALSE;
+
+ if(!backend->sent_shutdown) {
+ /* do this only once */
+ backend->sent_shutdown = TRUE;
+ if(send_shutdown) {
+ ret = mbedtls_ssl_close_notify(&backend->ssl);
+ switch(ret) {
+ case 0: /* we sent it, receive from the server */
+ break;
+ case MBEDTLS_ERR_SSL_PEER_CLOSE_NOTIFY: /* server also closed */
+ *done = TRUE;
+ goto out;
+ case MBEDTLS_ERR_SSL_WANT_READ:
+ connssl->io_need = CURL_SSL_IO_NEED_RECV;
+ goto out;
+ case MBEDTLS_ERR_SSL_WANT_WRITE:
+ connssl->io_need = CURL_SSL_IO_NEED_SEND;
+ goto out;
+ default:
+ CURL_TRC_CF(data, cf, "mbedtls_shutdown error -0x%04X", -ret);
+ result = CURLE_RECV_ERROR;
+ goto out;
+ }
+ }
+ }
+
+ /* SSL should now have started the shutdown from our side. Since it
+ * was not complete, we are lacking the close notify from the server. */
+ for(i = 0; i < 10; ++i) {
+ ret = mbedtls_ssl_read(&backend->ssl, buf, sizeof(buf));
+ /* This seems to be a bug in mbedTLS TLSv1.3 where it reports
+ * WANT_READ, but has not encountered an EAGAIN. */
+ if(ret == MBEDTLS_ERR_SSL_WANT_READ)
+ ret = mbedtls_ssl_read(&backend->ssl, buf, sizeof(buf));
+#ifdef TLS13_SUPPORT
+ if(ret == MBEDTLS_ERR_SSL_RECEIVED_NEW_SESSION_TICKET)
+ continue;
+#endif
+ if(ret <= 0)
+ break;
+ }
- mbedtls_pk_free(&backend->pk);
- mbedtls_x509_crt_free(&backend->clicert);
- mbedtls_x509_crt_free(&backend->cacert);
+ if(ret > 0) {
+ /* still data coming in? */
+ CURL_TRC_CF(data, cf, "mbedtls_shutdown, still getting data");
+ }
+ else if(ret == 0 || (ret == MBEDTLS_ERR_SSL_PEER_CLOSE_NOTIFY)) {
+ /* We got the close notify alert and are done. */
+ CURL_TRC_CF(data, cf, "mbedtls_shutdown done");
+ *done = TRUE;
+ }
+ else if(ret == MBEDTLS_ERR_SSL_WANT_READ) {
+ CURL_TRC_CF(data, cf, "mbedtls_shutdown, need RECV");
+ connssl->io_need = CURL_SSL_IO_NEED_RECV;
+ }
+ else if(ret == MBEDTLS_ERR_SSL_WANT_WRITE) {
+ CURL_TRC_CF(data, cf, "mbedtls_shutdown, need SEND");
+ connssl->io_need = CURL_SSL_IO_NEED_SEND;
+ }
+ else {
+ CURL_TRC_CF(data, cf, "mbedtls_shutdown error -0x%04X", -ret);
+ result = CURLE_RECV_ERROR;
+ }
+
+out:
+ cf->shutdown = (result || *done);
+ return result;
+}
+
+static void mbedtls_close(struct Curl_cfilter *cf, struct Curl_easy *data)
+{
+ struct ssl_connect_data *connssl = cf->ctx;
+ struct mbed_ssl_backend_data *backend =
+ (struct mbed_ssl_backend_data *)connssl->backend;
+
+ (void)data;
+ DEBUGASSERT(backend);
+ if(backend->initialized) {
+ mbedtls_pk_free(&backend->pk);
+ mbedtls_x509_crt_free(&backend->clicert);
+ mbedtls_x509_crt_free(&backend->cacert);
#ifdef MBEDTLS_X509_CRL_PARSE_C
- mbedtls_x509_crl_free(&backend->crl);
+ mbedtls_x509_crl_free(&backend->crl);
#endif
- Curl_safefree(backend->ciphersuites);
- mbedtls_ssl_config_free(&backend->config);
- mbedtls_ssl_free(&backend->ssl);
- mbedtls_ctr_drbg_free(&backend->ctr_drbg);
+ Curl_safefree(backend->ciphersuites);
+ mbedtls_ssl_config_free(&backend->config);
+ mbedtls_ssl_free(&backend->ssl);
+ mbedtls_ctr_drbg_free(&backend->ctr_drbg);
#ifndef THREADING_SUPPORT
- mbedtls_entropy_free(&backend->entropy);
+ mbedtls_entropy_free(&backend->entropy);
#endif /* THREADING_SUPPORT */
+ backend->initialized = FALSE;
+ }
}
static ssize_t mbed_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
@@ -1198,16 +1388,21 @@ static ssize_t mbed_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
ret = mbedtls_ssl_read(&backend->ssl, (unsigned char *)buf,
buffersize);
-
if(ret <= 0) {
+ CURL_TRC_CF(data, cf, "mbedtls_ssl_read(len=%zu) -> -0x%04X",
+ buffersize, -ret);
if(ret == MBEDTLS_ERR_SSL_PEER_CLOSE_NOTIFY)
return 0;
-
*curlcode = ((ret == MBEDTLS_ERR_SSL_WANT_READ)
#ifdef TLS13_SUPPORT
|| (ret == MBEDTLS_ERR_SSL_RECEIVED_NEW_SESSION_TICKET)
#endif
) ? CURLE_AGAIN : CURLE_RECV_ERROR;
+ if(*curlcode != CURLE_AGAIN) {
+ char errorbuf[128];
+ mbedtls_strerror(ret, errorbuf, sizeof(errorbuf));
+ failf(data, "ssl_read returned: (-0x%04X) %s", -ret, errorbuf);
+ }
return -1;
}
@@ -1290,7 +1485,7 @@ mbed_connect_common(struct Curl_cfilter *cf, struct Curl_easy *data,
}
if(ssl_connect_1 == connssl->connecting_state) {
- /* Find out how much more time we're allowed */
+ /* Find out how much more time we are allowed */
timeout_ms = Curl_timeleft(data, NULL, TRUE);
if(timeout_ms < 0) {
@@ -1303,9 +1498,7 @@ mbed_connect_common(struct Curl_cfilter *cf, struct Curl_easy *data,
return retcode;
}
- while(ssl_connect_2 == connssl->connecting_state ||
- ssl_connect_2_reading == connssl->connecting_state ||
- ssl_connect_2_writing == connssl->connecting_state) {
+ while(ssl_connect_2 == connssl->connecting_state) {
/* check allowed time left */
timeout_ms = Curl_timeleft(data, NULL, TRUE);
@@ -1316,14 +1509,13 @@ mbed_connect_common(struct Curl_cfilter *cf, struct Curl_easy *data,
return CURLE_OPERATION_TIMEDOUT;
}
- /* if ssl is expecting something, check if it's available. */
- if(connssl->connecting_state == ssl_connect_2_reading
- || connssl->connecting_state == ssl_connect_2_writing) {
+ /* if ssl is expecting something, check if it is available. */
+ if(connssl->io_need) {
- curl_socket_t writefd = ssl_connect_2_writing ==
- connssl->connecting_state?sockfd:CURL_SOCKET_BAD;
- curl_socket_t readfd = ssl_connect_2_reading ==
- connssl->connecting_state?sockfd:CURL_SOCKET_BAD;
+ curl_socket_t writefd = (connssl->io_need & CURL_SSL_IO_NEED_SEND)?
+ sockfd:CURL_SOCKET_BAD;
+ curl_socket_t readfd = (connssl->io_need & CURL_SSL_IO_NEED_RECV)?
+ sockfd:CURL_SOCKET_BAD;
what = Curl_socket_check(readfd, CURL_SOCKET_BAD, writefd,
nonblocking ? 0 : timeout_ms);
@@ -1353,11 +1545,10 @@ mbed_connect_common(struct Curl_cfilter *cf, struct Curl_easy *data,
* ensuring that a client using select() or epoll() will always
* have a valid fdset to wait on.
*/
+ connssl->io_need = CURL_SSL_IO_NEED_NONE;
retcode = mbed_connect_step2(cf, data);
- if(retcode || (nonblocking &&
- (ssl_connect_2 == connssl->connecting_state ||
- ssl_connect_2_reading == connssl->connecting_state ||
- ssl_connect_2_writing == connssl->connecting_state)))
+ if(retcode ||
+ (nonblocking && (ssl_connect_2 == connssl->connecting_state)))
return retcode;
} /* repeat step2 until all transactions are done. */
@@ -1474,6 +1665,7 @@ const struct Curl_ssl Curl_ssl_mbedtls = {
SSLSUPP_CA_PATH |
SSLSUPP_CAINFO_BLOB |
+ SSLSUPP_CERTINFO |
SSLSUPP_PINNEDPUBKEY |
SSLSUPP_SSL_CTX |
SSLSUPP_HTTPS_PROXY,
@@ -1484,7 +1676,7 @@ const struct Curl_ssl Curl_ssl_mbedtls = {
mbedtls_cleanup, /* cleanup */
mbedtls_version, /* version */
Curl_none_check_cxn, /* check_cxn */
- Curl_none_shutdown, /* shutdown */
+ mbedtls_shutdown, /* shutdown */
mbedtls_data_pending, /* data_pending */
mbedtls_random, /* random */
Curl_none_cert_status_request, /* cert_status_request */
@@ -1501,7 +1693,6 @@ const struct Curl_ssl Curl_ssl_mbedtls = {
mbedtls_sha256sum, /* sha256sum */
NULL, /* associate_connection */
NULL, /* disassociate_connection */
- NULL, /* free_multi_ssl_backend_data */
mbed_recv, /* recv decrypted data */
mbed_send, /* send data to encrypt */
};
diff --git a/libs/libcurl/src/vtls/openssl.c b/libs/libcurl/src/vtls/openssl.c
index f53cb69471..7cc15350a4 100644
--- a/libs/libcurl/src/vtls/openssl.c
+++ b/libs/libcurl/src/vtls/openssl.c
@@ -254,13 +254,20 @@
#endif
#endif
+#if defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC)
+typedef size_t numcert_t;
+#else
+typedef int numcert_t;
+#endif
+#define ossl_valsize_t numcert_t
+
#if (OPENSSL_VERSION_NUMBER >= 0x10100000L)
/* up2date versions of OpenSSL maintain reasonably secure defaults without
* breaking compatibility, so it is better not to override the defaults in curl
*/
#define DEFAULT_CIPHER_SELECTION NULL
#else
-/* ... but it is not the case with old versions of OpenSSL */
+/* not the case with old versions of OpenSSL */
#define DEFAULT_CIPHER_SELECTION \
"ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH"
#endif
@@ -307,14 +314,6 @@ typedef unsigned long sslerr_t;
#define USE_PRE_1_1_API (OPENSSL_VERSION_NUMBER < 0x10100000L)
#endif /* !LIBRESSL_VERSION_NUMBER */
-#if defined(HAVE_SSL_X509_STORE_SHARE)
-struct multi_ssl_backend_data {
- char *CAfile; /* CAfile path used to generate X509 store */
- X509_STORE *store; /* cached X509 store or NULL if none */
- struct curltime time; /* when the cached store was created */
-};
-#endif /* HAVE_SSL_X509_STORE_SHARE */
-
#define push_certinfo(_label, _num) \
do { \
long info_len = BIO_get_mem_data(mem, &ptr); \
@@ -381,7 +380,7 @@ static void X509V3_ext(struct Curl_easy *data,
for(i = 0; i < (int)sk_X509_EXTENSION_num(exts); i++) {
ASN1_OBJECT *obj;
- X509_EXTENSION *ext = sk_X509_EXTENSION_value(exts, i);
+ X509_EXTENSION *ext = sk_X509_EXTENSION_value(exts, (ossl_valsize_t)i);
BUF_MEM *biomem;
char namebuf[128];
BIO *bio_out = BIO_new(BIO_s_mem());
@@ -403,12 +402,6 @@ static void X509V3_ext(struct Curl_easy *data,
}
}
-#if defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC)
-typedef size_t numcert_t;
-#else
-typedef int numcert_t;
-#endif
-
CURLcode Curl_ossl_certchain(struct Curl_easy *data, SSL *ssl)
{
CURLcode result;
@@ -438,7 +431,7 @@ CURLcode Curl_ossl_certchain(struct Curl_easy *data, SSL *ssl)
for(i = 0; i < (int)numcerts; i++) {
ASN1_INTEGER *num;
- X509 *x = sk_X509_value(sk, i);
+ X509 *x = sk_X509_value(sk, (ossl_valsize_t)i);
EVP_PKEY *pubkey = NULL;
int j;
char *ptr;
@@ -727,7 +720,10 @@ static int ossl_bio_cf_out_write(BIO *bio, const char *buf, int blen)
CURLcode result = CURLE_SEND_ERROR;
DEBUGASSERT(data);
- nwritten = Curl_conn_cf_send(cf->next, data, buf, blen, &result);
+ if(blen < 0)
+ return 0;
+
+ nwritten = Curl_conn_cf_send(cf->next, data, buf, (size_t)blen, &result);
CURL_TRC_CF(data, cf, "ossl_bio_cf_out_write(len=%d) -> %d, err=%d",
blen, (int)nwritten, result);
BIO_clear_retry_flags(bio);
@@ -752,8 +748,10 @@ static int ossl_bio_cf_in_read(BIO *bio, char *buf, int blen)
/* OpenSSL catches this case, so should we. */
if(!buf)
return 0;
+ if(blen < 0)
+ return 0;
- nread = Curl_conn_cf_recv(cf->next, data, buf, blen, &result);
+ nread = Curl_conn_cf_recv(cf->next, data, buf, (size_t)blen, &result);
CURL_TRC_CF(data, cf, "ossl_bio_cf_in_read(len=%d) -> %d, err=%d",
blen, (int)nread, result);
BIO_clear_retry_flags(bio);
@@ -844,7 +842,7 @@ static void ossl_keylog_callback(const SSL *ssl, const char *line)
#else
/*
* ossl_log_tls12_secret is called by libcurl to make the CLIENT_RANDOMs if the
- * OpenSSL being used doesn't have native support for doing that.
+ * OpenSSL being used does not have native support for doing that.
*/
static void
ossl_log_tls12_secret(const SSL *ssl, bool *keylog_done)
@@ -860,7 +858,7 @@ ossl_log_tls12_secret(const SSL *ssl, bool *keylog_done)
#if OPENSSL_VERSION_NUMBER >= 0x10100000L && \
!(defined(LIBRESSL_VERSION_NUMBER) && \
LIBRESSL_VERSION_NUMBER < 0x20700000L)
- /* ssl->s3 is not checked in openssl 1.1.0-pre6, but let's assume that
+ /* ssl->s3 is not checked in OpenSSL 1.1.0-pre6, but let's assume that
* we have a valid SSL context if we have a non-NULL session. */
SSL_get_client_random(ssl, client_random, SSL3_RANDOM_SIZE);
master_key_length = (int)
@@ -963,7 +961,7 @@ static int passwd_callback(char *buf, int num, int encrypting,
{
DEBUGASSERT(0 == encrypting);
- if(!encrypting) {
+ if(!encrypting && num >= 0) {
int klen = curlx_uztosi(strlen((char *)global_passwd));
if(num > klen) {
memcpy(buf, global_passwd, klen + 1);
@@ -1014,13 +1012,12 @@ static CURLcode ossl_seed(struct Curl_easy *data)
for(i = 0, i_max = len / sizeof(struct curltime); i < i_max; ++i) {
struct curltime tv = Curl_now();
Curl_wait_ms(1);
- tv.tv_sec *= i + 1;
- tv.tv_usec *= (unsigned int)i + 2;
- tv.tv_sec ^= ((Curl_now().tv_sec + Curl_now().tv_usec) *
- (i + 3)) << 8;
- tv.tv_usec ^= (unsigned int) ((Curl_now().tv_sec +
- Curl_now().tv_usec) *
- (i + 4)) << 16;
+ tv.tv_sec *= (time_t)i + 1;
+ tv.tv_usec *= (int)i + 2;
+ tv.tv_sec ^= ((Curl_now().tv_sec + (time_t)Curl_now().tv_usec) *
+ (time_t)(i + 3)) << 8;
+ tv.tv_usec ^= (int) ((Curl_now().tv_sec + (time_t)Curl_now().tv_usec) *
+ (time_t)(i + 4)) << 16;
memcpy(&randb[i * sizeof(struct curltime)], &tv,
sizeof(struct curltime));
}
@@ -1033,7 +1030,7 @@ static CURLcode ossl_seed(struct Curl_easy *data)
fname[0] = 0; /* blank it first */
RAND_file_name(fname, sizeof(fname));
if(fname[0]) {
- /* we got a file name to try */
+ /* we got a filename to try */
RAND_load_file(fname, RAND_LOAD_LENGTH);
if(rand_enough())
return CURLE_OK;
@@ -1369,7 +1366,7 @@ int cert_stuff(struct Curl_easy *data,
}
if(!params.cert) {
- failf(data, "ssl engine didn't initialized the certificate "
+ failf(data, "ssl engine did not initialized the certificate "
"properly.");
return 0;
}
@@ -1380,10 +1377,10 @@ int cert_stuff(struct Curl_easy *data,
sizeof(error_buffer)));
return 0;
}
- X509_free(params.cert); /* we don't need the handle any more... */
+ X509_free(params.cert); /* we do not need the handle any more... */
}
else {
- failf(data, "crypto engine not set, can't load certificate");
+ failf(data, "crypto engine not set, cannot load certificate");
return 0;
}
}
@@ -1479,7 +1476,7 @@ int cert_stuff(struct Curl_easy *data,
* Note that sk_X509_pop() is used below to make sure the cert is
* removed from the stack properly before getting passed to
* SSL_CTX_add_extra_chain_cert(), which takes ownership. Previously
- * we used sk_X509_value() instead, but then we'd clean it in the
+ * we used sk_X509_value() instead, but then we would clean it in the
* subsequent sk_X509_pop_free() call.
*/
X509 *x = sk_X509_pop(ca);
@@ -1572,10 +1569,10 @@ fail:
EVP_PKEY_free(priv_key);
return 0;
}
- EVP_PKEY_free(priv_key); /* we don't need the handle any more... */
+ EVP_PKEY_free(priv_key); /* we do not need the handle any more... */
}
else {
- failf(data, "crypto engine not set, can't load private key");
+ failf(data, "crypto engine not set, cannot load private key");
return 0;
}
}
@@ -1614,8 +1611,8 @@ fail:
#if !defined(OPENSSL_NO_RSA) && !defined(OPENSSL_IS_BORINGSSL) && \
!defined(OPENSSL_NO_DEPRECATED_3_0)
{
- /* If RSA is used, don't check the private key if its flags indicate
- * it doesn't support it. */
+ /* If RSA is used, do not check the private key if its flags indicate
+ * it does not support it. */
EVP_PKEY *priv_key = SSL_get_privatekey(ssl);
int pktype;
#ifdef HAVE_OPAQUE_EVP_PKEY
@@ -1681,7 +1678,7 @@ static int x509_name_oneline(X509_NAME *a, char *buf, size_t size)
if((size_t)biomem->length < size)
size = biomem->length;
else
- size--; /* don't overwrite the buffer end */
+ size--; /* do not overwrite the buffer end */
memcpy(buf, biomem->data, size);
buf[size] = 0;
@@ -1873,203 +1870,133 @@ static struct curl_slist *ossl_engines_list(struct Curl_easy *data)
return list;
}
-static void ossl_close(struct Curl_cfilter *cf, struct Curl_easy *data)
+static CURLcode ossl_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ bool send_shutdown, bool *done)
{
struct ssl_connect_data *connssl = cf->ctx;
struct ossl_ctx *octx = (struct ossl_ctx *)connssl->backend;
+ CURLcode result = CURLE_OK;
+ char buf[1024];
+ int nread, err;
+ unsigned long sslerr;
+ size_t i;
- (void)data;
DEBUGASSERT(octx);
+ if(!octx->ssl || cf->shutdown) {
+ *done = TRUE;
+ goto out;
+ }
- if(octx->ssl) {
- /* Send the TLS shutdown if we are still connected *and* if
- * the peer did not already close the connection. */
- if(cf->next && cf->next->connected && !connssl->peer_closed) {
- char buf[1024];
- int nread, err;
- long sslerr;
-
- /* Maybe the server has already sent a close notify alert.
- Read it to avoid an RST on the TCP connection. */
- ERR_clear_error();
+ connssl->io_need = CURL_SSL_IO_NEED_NONE;
+ *done = FALSE;
+ if(!(SSL_get_shutdown(octx->ssl) & SSL_SENT_SHUTDOWN)) {
+ /* We have not started the shutdown from our side yet. Check
+ * if the server already sent us one. */
+ ERR_clear_error();
+ for(i = 0; i < 10; ++i) {
nread = SSL_read(octx->ssl, buf, (int)sizeof(buf));
- err = SSL_get_error(octx->ssl, nread);
- if(!nread && err == SSL_ERROR_ZERO_RETURN) {
- CURLcode result;
- ssize_t n;
- size_t blen = sizeof(buf);
- CURL_TRC_CF(data, cf, "peer has shutdown TLS");
- /* SSL_read() will not longer touch the socket, let's receive
- * directly from the next filter to see if the underlying
- * connection has also been closed. */
- n = Curl_conn_cf_recv(cf->next, data, buf, blen, &result);
- if(!n) {
- connssl->peer_closed = TRUE;
- CURL_TRC_CF(data, cf, "peer closed connection");
- }
- }
- ERR_clear_error();
- if(connssl->peer_closed) {
- /* As the peer closed, we do not expect it to read anything more we
- * may send. It may be harmful, leading to TCP RST and delaying
- * a lingering close. Just leave. */
- CURL_TRC_CF(data, cf, "not from sending TLS shutdown on "
- "connection closed by peer");
- }
- else if(SSL_shutdown(octx->ssl) == 1) {
- CURL_TRC_CF(data, cf, "SSL shutdown finished");
+ CURL_TRC_CF(data, cf, "SSL shutdown not sent, read -> %d", nread);
+ if(nread <= 0)
+ break;
+ }
+ err = SSL_get_error(octx->ssl, nread);
+ if(!nread && err == SSL_ERROR_ZERO_RETURN) {
+ bool input_pending;
+ /* Yes, it did. */
+ if(!send_shutdown) {
+ CURL_TRC_CF(data, cf, "SSL shutdown received, not sending");
+ *done = TRUE;
+ goto out;
}
- else {
- nread = SSL_read(octx->ssl, buf, (int)sizeof(buf));
- err = SSL_get_error(octx->ssl, nread);
- switch(err) {
- case SSL_ERROR_NONE: /* this is not an error */
- case SSL_ERROR_ZERO_RETURN: /* no more data */
- CURL_TRC_CF(data, cf, "SSL shutdown, EOF from server");
- break;
- case SSL_ERROR_WANT_READ:
- /* SSL has send its notify and now wants to read the reply
- * from the server. We are not really interested in that. */
- CURL_TRC_CF(data, cf, "SSL shutdown sent");
- break;
- case SSL_ERROR_WANT_WRITE:
- CURL_TRC_CF(data, cf, "SSL shutdown send blocked");
- break;
- default:
- sslerr = ERR_get_error();
- CURL_TRC_CF(data, cf, "SSL shutdown, error: '%s', errno %d",
- (sslerr ?
- ossl_strerror(sslerr, buf, sizeof(buf)) :
- SSL_ERROR_to_str(err)),
- SOCKERRNO);
- break;
- }
+ else if(!cf->next->cft->is_alive(cf->next, data, &input_pending)) {
+ /* Server closed the connection after its closy notify. It
+ * seems not interested to see our close notify, so do not
+ * send it. We are done. */
+ connssl->peer_closed = TRUE;
+ CURL_TRC_CF(data, cf, "peer closed connection");
+ *done = TRUE;
+ goto out;
}
-
- ERR_clear_error();
- SSL_set_connect_state(octx->ssl);
}
+ if(send_shutdown && SSL_shutdown(octx->ssl) == 1) {
+ CURL_TRC_CF(data, cf, "SSL shutdown finished");
+ *done = TRUE;
+ goto out;
+ }
+ }
- SSL_free(octx->ssl);
- octx->ssl = NULL;
+ /* SSL should now have started the shutdown from our side. Since it
+ * was not complete, we are lacking the close notify from the server. */
+ for(i = 0; i < 10; ++i) {
+ ERR_clear_error();
+ nread = SSL_read(octx->ssl, buf, (int)sizeof(buf));
+ CURL_TRC_CF(data, cf, "SSL shutdown read -> %d", nread);
+ if(nread <= 0)
+ break;
}
- if(octx->ssl_ctx) {
- SSL_CTX_free(octx->ssl_ctx);
- octx->ssl_ctx = NULL;
- octx->x509_store_setup = FALSE;
+ if(SSL_get_shutdown(octx->ssl) & SSL_RECEIVED_SHUTDOWN) {
+ CURL_TRC_CF(data, cf, "SSL shutdown received, finished");
+ *done = TRUE;
+ goto out;
}
- if(octx->bio_method) {
- ossl_bio_cf_method_free(octx->bio_method);
- octx->bio_method = NULL;
+ err = SSL_get_error(octx->ssl, nread);
+ switch(err) {
+ case SSL_ERROR_ZERO_RETURN: /* no more data */
+ CURL_TRC_CF(data, cf, "SSL shutdown not received, but closed");
+ *done = TRUE;
+ break;
+ case SSL_ERROR_NONE: /* just did not get anything */
+ case SSL_ERROR_WANT_READ:
+ /* SSL has send its notify and now wants to read the reply
+ * from the server. We are not really interested in that. */
+ CURL_TRC_CF(data, cf, "SSL shutdown sent, want receive");
+ connssl->io_need = CURL_SSL_IO_NEED_RECV;
+ break;
+ case SSL_ERROR_WANT_WRITE:
+ CURL_TRC_CF(data, cf, "SSL shutdown send blocked");
+ connssl->io_need = CURL_SSL_IO_NEED_SEND;
+ break;
+ default:
+ /* Server seems to have closed the connection without sending us
+ * a close notify. */
+ sslerr = ERR_get_error();
+ CURL_TRC_CF(data, cf, "SSL shutdown, ignore recv error: '%s', errno %d",
+ (sslerr ?
+ ossl_strerror(sslerr, buf, sizeof(buf)) :
+ SSL_ERROR_to_str(err)),
+ SOCKERRNO);
+ *done = TRUE;
+ result = CURLE_OK;
+ break;
}
+
+out:
+ cf->shutdown = (result || *done);
+ return result;
}
-/*
- * This function is called to shut down the SSL layer but keep the
- * socket open (CCC - Clear Command Channel)
- */
-static int ossl_shutdown(struct Curl_cfilter *cf,
- struct Curl_easy *data)
+static void ossl_close(struct Curl_cfilter *cf, struct Curl_easy *data)
{
- int retval = 0;
struct ssl_connect_data *connssl = cf->ctx;
- char buf[256]; /* We will use this for the OpenSSL error buffer, so it has
- to be at least 256 bytes long. */
- unsigned long sslerror;
- int nread;
- int buffsize;
- int err;
- bool done = FALSE;
struct ossl_ctx *octx = (struct ossl_ctx *)connssl->backend;
- int loop = 10;
+ (void)data;
DEBUGASSERT(octx);
-#ifndef CURL_DISABLE_FTP
- /* This has only been tested on the proftpd server, and the mod_tls code
- sends a close notify alert without waiting for a close notify alert in
- response. Thus we wait for a close notify alert from the server, but
- we do not send one. Let's hope other servers do the same... */
-
- if(data->set.ftp_ccc == CURLFTPSSL_CCC_ACTIVE)
- (void)SSL_shutdown(octx->ssl);
-#endif
-
if(octx->ssl) {
- buffsize = (int)sizeof(buf);
- while(!done && loop--) {
- int what = SOCKET_READABLE(Curl_conn_cf_get_socket(cf, data),
- SSL_SHUTDOWN_TIMEOUT);
- if(what > 0) {
- ERR_clear_error();
-
- /* Something to read, let's do it and hope that it is the close
- notify alert from the server */
- nread = SSL_read(octx->ssl, buf, buffsize);
- err = SSL_get_error(octx->ssl, nread);
-
- switch(err) {
- case SSL_ERROR_NONE: /* this is not an error */
- case SSL_ERROR_ZERO_RETURN: /* no more data */
- /* This is the expected response. There was no data but only
- the close notify alert */
- done = TRUE;
- break;
- case SSL_ERROR_WANT_READ:
- /* there's data pending, re-invoke SSL_read() */
- infof(data, "SSL_ERROR_WANT_READ");
- break;
- case SSL_ERROR_WANT_WRITE:
- /* SSL wants a write. Really odd. Let's bail out. */
- infof(data, "SSL_ERROR_WANT_WRITE");
- done = TRUE;
- break;
- default:
- /* openssl/ssl.h says "look at error stack/return value/errno" */
- sslerror = ERR_get_error();
- failf(data, OSSL_PACKAGE " SSL_read on shutdown: %s, errno %d",
- (sslerror ?
- ossl_strerror(sslerror, buf, sizeof(buf)) :
- SSL_ERROR_to_str(err)),
- SOCKERRNO);
- done = TRUE;
- break;
- }
- }
- else if(0 == what) {
- /* timeout */
- failf(data, "SSL shutdown timeout");
- done = TRUE;
- }
- else {
- /* anything that gets here is fatally bad */
- failf(data, "select/poll on SSL socket, errno: %d", SOCKERRNO);
- retval = -1;
- done = TRUE;
- }
- } /* while()-loop for the select() */
-
- if(data->set.verbose) {
-#ifdef HAVE_SSL_GET_SHUTDOWN
- switch(SSL_get_shutdown(octx->ssl)) {
- case SSL_SENT_SHUTDOWN:
- infof(data, "SSL_get_shutdown() returned SSL_SENT_SHUTDOWN");
- break;
- case SSL_RECEIVED_SHUTDOWN:
- infof(data, "SSL_get_shutdown() returned SSL_RECEIVED_SHUTDOWN");
- break;
- case SSL_SENT_SHUTDOWN|SSL_RECEIVED_SHUTDOWN:
- infof(data, "SSL_get_shutdown() returned SSL_SENT_SHUTDOWN|"
- "SSL_RECEIVED__SHUTDOWN");
- break;
- }
-#endif
- }
-
SSL_free(octx->ssl);
octx->ssl = NULL;
}
- return retval;
+ if(octx->ssl_ctx) {
+ SSL_CTX_free(octx->ssl_ctx);
+ octx->ssl_ctx = NULL;
+ octx->x509_store_setup = FALSE;
+ }
+ if(octx->bio_method) {
+ ossl_bio_cf_method_free(octx->bio_method);
+ octx->bio_method = NULL;
+ }
}
static void ossl_session_free(void *sessionid, size_t idsize)
@@ -2107,7 +2034,7 @@ static void ossl_close_all(struct Curl_easy *data)
/* ====================================================== */
/*
- * Match subjectAltName against the host name.
+ * Match subjectAltName against the hostname.
*/
static bool subj_alt_hostcheck(struct Curl_easy *data,
const char *match_pattern,
@@ -2137,7 +2064,7 @@ static bool subj_alt_hostcheck(struct Curl_easy *data,
Certification Authorities are encouraged to use the dNSName instead.
Matching is performed using the matching rules specified by
- [RFC2459]. If more than one identity of a given type is present in
+ [RFC2459]. If more than one identity of a given type is present in
the certificate (e.g., more than one dNSName name, a match in any one
of the set is considered acceptable.) Names may contain the wildcard
character * which is considered to match any single domain name
@@ -2208,7 +2135,7 @@ CURLcode Curl_ossl_verifyhost(struct Curl_easy *data, struct connectdata *conn,
bool ipmatched = FALSE;
/* get amount of alternatives, RFC2459 claims there MUST be at least
- one, but we don't depend on it... */
+ one, but we do not depend on it... */
numalts = sk_GENERAL_NAME_num(altnames);
/* loop through all alternatives - until a dnsmatch */
@@ -2229,7 +2156,7 @@ CURLcode Curl_ossl_verifyhost(struct Curl_easy *data, struct connectdata *conn,
switch(target) {
case GEN_DNS: /* name/pattern comparison */
- /* The OpenSSL man page explicitly says: "In general it cannot be
+ /* The OpenSSL manpage explicitly says: "In general it cannot be
assumed that the data returned by ASN1_STRING_data() is null
terminated or does not contain embedded nulls." But also that
"The actual format of the data will depend on the actual string
@@ -2239,7 +2166,7 @@ CURLcode Curl_ossl_verifyhost(struct Curl_easy *data, struct connectdata *conn,
is always null-terminated.
*/
if((altlen == strlen(altptr)) &&
- /* if this isn't true, there was an embedded zero in the name
+ /* if this is not true, there was an embedded zero in the name
string and we cannot match it. */
subj_alt_hostcheck(data, altptr, altlen,
peer->hostname, hostlen,
@@ -2271,7 +2198,7 @@ CURLcode Curl_ossl_verifyhost(struct Curl_easy *data, struct connectdata *conn,
/* an alternative name matched */
;
else if(dNSName || iPAddress) {
- const char *tname = (peer->type == CURL_SSL_PEER_DNS) ? "host name" :
+ const char *tname = (peer->type == CURL_SSL_PEER_DNS) ? "hostname" :
(peer->type == CURL_SSL_PEER_IPV4) ?
"ipv4 address" : "ipv6 address";
infof(data, " subjectAltName does not match %s %s", tname, peer->dispname);
@@ -2342,7 +2269,7 @@ CURLcode Curl_ossl_verifyhost(struct Curl_easy *data, struct connectdata *conn,
else if(!Curl_cert_hostcheck((const char *)peer_CN,
peerlen, peer->hostname, hostlen)) {
failf(data, "SSL: certificate subject name '%s' does not match "
- "target host name '%s'", peer_CN, peer->dispname);
+ "target hostname '%s'", peer_CN, peer->dispname);
result = CURLE_PEER_FAILED_VERIFICATION;
}
else {
@@ -2358,9 +2285,9 @@ CURLcode Curl_ossl_verifyhost(struct Curl_easy *data, struct connectdata *conn,
#if (OPENSSL_VERSION_NUMBER >= 0x0090808fL) && !defined(OPENSSL_NO_TLSEXT) && \
!defined(OPENSSL_NO_OCSP)
static CURLcode verifystatus(struct Curl_cfilter *cf,
- struct Curl_easy *data)
+ struct Curl_easy *data,
+ struct ossl_ctx *octx)
{
- struct ssl_connect_data *connssl = cf->ctx;
int i, ocsp_status;
#if defined(OPENSSL_IS_AWSLC)
const uint8_t *status;
@@ -2373,7 +2300,6 @@ static CURLcode verifystatus(struct Curl_cfilter *cf,
OCSP_BASICRESP *br = NULL;
X509_STORE *st = NULL;
STACK_OF(X509) *ch = NULL;
- struct ossl_ctx *octx = (struct ossl_ctx *)connssl->backend;
X509 *cert;
OCSP_CERTID *id = NULL;
int cert_status, crl_reason;
@@ -2381,9 +2307,10 @@ static CURLcode verifystatus(struct Curl_cfilter *cf,
int ret;
long len;
+ (void)cf;
DEBUGASSERT(octx);
- len = SSL_get_tlsext_status_ocsp_resp(octx->ssl, &status);
+ len = (long)SSL_get_tlsext_status_ocsp_resp(octx->ssl, &status);
if(!status) {
failf(data, "No OCSP response received");
@@ -2425,8 +2352,8 @@ static CURLcode verifystatus(struct Curl_cfilter *cf,
(defined(LIBRESSL_VERSION_NUMBER) && \
LIBRESSL_VERSION_NUMBER <= 0x2040200fL))
/* The authorized responder cert in the OCSP response MUST be signed by the
- peer cert's issuer (see RFC6960 section 4.2.2.2). If that's a root cert,
- no problem, but if it's an intermediate cert OpenSSL has a bug where it
+ peer cert's issuer (see RFC6960 section 4.2.2.2). If that is a root cert,
+ no problem, but if it is an intermediate cert OpenSSL has a bug where it
expects this issuer to be present in the chain embedded in the OCSP
response. So we add it if necessary. */
@@ -2464,7 +2391,7 @@ static CURLcode verifystatus(struct Curl_cfilter *cf,
}
for(i = 0; i < (int)sk_X509_num(ch); i++) {
- X509 *issuer = sk_X509_value(ch, i);
+ X509 *issuer = sk_X509_value(ch, (ossl_valsize_t)i);
if(X509_check_issued(issuer, cert) == X509_V_OK) {
id = OCSP_cert_to_id(EVP_sha1(), cert, issuer);
break;
@@ -2525,7 +2452,7 @@ end:
#endif /* USE_OPENSSL */
-/* The SSL_CTRL_SET_MSG_CALLBACK doesn't exist in ancient OpenSSL versions
+/* The SSL_CTRL_SET_MSG_CALLBACK does not exist in ancient OpenSSL versions
and thus this cannot be done there. */
#ifdef SSL_CTRL_SET_MSG_CALLBACK
@@ -2710,7 +2637,7 @@ static void ossl_trace(int direction, int ssl_ver, int content_type,
ssl_ver >>= 8; /* check the upper 8 bits only below */
- /* SSLv2 doesn't seem to have TLS record-type headers, so OpenSSL
+ /* SSLv2 does not seem to have TLS record-type headers, so OpenSSL
* always pass-up content-type as 0. But the interesting message-type
* is at 'buf[0]'.
*/
@@ -2797,7 +2724,7 @@ ossl_set_ssl_version_min_max(struct Curl_cfilter *cf, SSL_CTX *ctx)
}
/* CURL_SSLVERSION_DEFAULT means that no option was selected.
- We don't want to pass 0 to SSL_CTX_set_min_proto_version as
+ We do not want to pass 0 to SSL_CTX_set_min_proto_version as
it would enable all versions down to the lowest supported by
the library.
So we skip this, and stay with the library default
@@ -2809,7 +2736,7 @@ ossl_set_ssl_version_min_max(struct Curl_cfilter *cf, SSL_CTX *ctx)
}
/* ... then, TLS max version */
- curl_ssl_version_max = conn_config->version_max;
+ curl_ssl_version_max = (long)conn_config->version_max;
/* convert curl max SSL version option to OpenSSL constant */
switch(curl_ssl_version_max) {
@@ -2850,6 +2777,9 @@ ossl_set_ssl_version_min_max(struct Curl_cfilter *cf, SSL_CTX *ctx)
typedef uint32_t ctx_option_t;
#elif OPENSSL_VERSION_NUMBER >= 0x30000000L
typedef uint64_t ctx_option_t;
+#elif OPENSSL_VERSION_NUMBER >= 0x10100000L && \
+ !defined(LIBRESSL_VERSION_NUMBER)
+typedef unsigned long ctx_option_t;
#else
typedef long ctx_option_t;
#endif
@@ -2857,14 +2787,14 @@ typedef long ctx_option_t;
#if (OPENSSL_VERSION_NUMBER < 0x10100000L) /* 1.1.0 */
static CURLcode
ossl_set_ssl_version_min_max_legacy(ctx_option_t *ctx_options,
- struct Curl_cfilter *cf,
- struct Curl_easy *data)
+ struct Curl_cfilter *cf,
+ struct Curl_easy *data)
{
struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
long ssl_version = conn_config->version;
long ssl_version_max = conn_config->version_max;
- (void) data; /* In case it's unused. */
+ (void) data; /* In case it is unused. */
switch(ssl_version) {
case CURL_SSLVERSION_TLSv1_3:
@@ -2937,42 +2867,25 @@ CURLcode Curl_ossl_add_session(struct Curl_cfilter *cf,
SSL_SESSION *session)
{
const struct ssl_config_data *config;
- bool isproxy;
- bool added = FALSE;
+ CURLcode result = CURLE_OK;
if(!cf || !data)
goto out;
- isproxy = Curl_ssl_cf_is_proxy(cf);
-
config = Curl_ssl_cf_get_config(cf, data);
- if(config->primary.sessionid) {
- bool incache;
- void *old_session = NULL;
+ if(config->primary.cache_session) {
Curl_ssl_sessionid_lock(data);
- if(isproxy)
- incache = FALSE;
- else
- incache = !(Curl_ssl_getsessionid(cf, data, peer,
- &old_session, NULL));
- if(incache && (old_session != session)) {
- infof(data, "old SSL session ID is stale, removing");
- Curl_ssl_delsessionid(data, old_session);
- incache = FALSE;
- }
-
- if(!incache) {
- added = TRUE;
- Curl_ssl_addsessionid(cf, data, peer, session, 0, ossl_session_free);
- }
+ result = Curl_ssl_set_sessionid(cf, data, peer, session, 0,
+ ossl_session_free);
+ session = NULL; /* call has taken ownership */
Curl_ssl_sessionid_unlock(data);
}
out:
- if(!added)
+ if(session)
ossl_session_free(session, 0);
- return CURLE_OK;
+ return result;
}
/* The "new session" callback must return zero if the session can be removed
@@ -3017,7 +2930,7 @@ static CURLcode load_cacert_from_memory(X509_STORE *store,
/* add each entry from PEM file to x509_store */
for(i = 0; i < (int)sk_X509_INFO_num(inf); ++i) {
- itmp = sk_X509_INFO_value(inf, i);
+ itmp = sk_X509_INFO_value(inf, (ossl_valsize_t)i);
if(itmp->x509) {
if(X509_STORE_add_cert(store, itmp->x509)) {
++count;
@@ -3043,7 +2956,7 @@ static CURLcode load_cacert_from_memory(X509_STORE *store,
sk_X509_INFO_pop_free(inf, X509_INFO_free);
BIO_free(cbio);
- /* if we didn't end up importing anything, treat that as an error */
+ /* if we did not end up importing anything, treat that as an error */
return (count > 0) ? CURLE_OK : CURLE_SSL_CACERT_BADFILE;
}
@@ -3164,7 +3077,7 @@ static CURLcode import_windows_cert_store(struct Curl_easy *data,
else
continue;
- x509 = d2i_X509(NULL, &encoded_cert, pContext->cbCertEncoded);
+ x509 = d2i_X509(NULL, &encoded_cert, (long)pContext->cbCertEncoded);
if(!x509)
continue;
@@ -3302,8 +3215,8 @@ static CURLcode populate_x509_store(struct Curl_cfilter *cf,
#ifdef CURL_CA_FALLBACK
if(!ssl_cafile && !ssl_capath &&
!imported_native_ca && !imported_ca_info_blob) {
- /* verifying the peer without any CA certificates won't
- work so use openssl's built-in default as fallback */
+ /* verifying the peer without any CA certificates will not
+ work so use OpenSSL's built-in default as fallback */
X509_STORE_set_default_paths(store);
}
#endif
@@ -3328,7 +3241,7 @@ static CURLcode populate_x509_store(struct Curl_cfilter *cf,
if(verifypeer) {
/* Try building a chain using issuers in the trusted store first to avoid
- problems with server-sent legacy intermediates. Newer versions of
+ problems with server-sent legacy intermediates. Newer versions of
OpenSSL do alternate chain checking by default but we do not know how to
determine that in a reliable manner.
https://rt.openssl.org/Ticket/Display.html?id=3621&user=guest&pass=guest
@@ -3355,23 +3268,49 @@ static CURLcode populate_x509_store(struct Curl_cfilter *cf,
}
#if defined(HAVE_SSL_X509_STORE_SHARE)
-static bool cached_x509_store_expired(const struct Curl_easy *data,
- const struct multi_ssl_backend_data *mb)
+
+/* key to use at `multi->proto_hash` */
+#define MPROTO_OSSL_X509_KEY "tls:ossl:x509:share"
+
+struct ossl_x509_share {
+ char *CAfile; /* CAfile path used to generate X509 store */
+ X509_STORE *store; /* cached X509 store or NULL if none */
+ struct curltime time; /* when the cached store was created */
+};
+
+static void oss_x509_share_free(void *key, size_t key_len, void *p)
{
- const struct ssl_general_config *cfg = &data->set.general_ssl;
- struct curltime now = Curl_now();
- timediff_t elapsed_ms = Curl_timediff(now, mb->time);
- timediff_t timeout_ms = cfg->ca_cache_timeout * (timediff_t)1000;
+ struct ossl_x509_share *share = p;
+ DEBUGASSERT(key_len == (sizeof(MPROTO_OSSL_X509_KEY)-1));
+ DEBUGASSERT(!memcmp(MPROTO_OSSL_X509_KEY, key, key_len));
+ (void)key;
+ (void)key_len;
+ if(share->store) {
+ X509_STORE_free(share->store);
+ }
+ free(share->CAfile);
+ free(share);
+}
- if(timeout_ms < 0)
- return false;
+static bool
+cached_x509_store_expired(const struct Curl_easy *data,
+ const struct ossl_x509_share *mb)
+{
+ const struct ssl_general_config *cfg = &data->set.general_ssl;
+ if(cfg->ca_cache_timeout < 0)
+ return FALSE;
+ else {
+ struct curltime now = Curl_now();
+ timediff_t elapsed_ms = Curl_timediff(now, mb->time);
+ timediff_t timeout_ms = cfg->ca_cache_timeout * (timediff_t)1000;
- return elapsed_ms >= timeout_ms;
+ return elapsed_ms >= timeout_ms;
+ }
}
-static bool cached_x509_store_different(
- struct Curl_cfilter *cf,
- const struct multi_ssl_backend_data *mb)
+static bool
+cached_x509_store_different(struct Curl_cfilter *cf,
+ const struct ossl_x509_share *mb)
{
struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
if(!mb->CAfile || !conn_config->CAfile)
@@ -3384,15 +3323,17 @@ static X509_STORE *get_cached_x509_store(struct Curl_cfilter *cf,
const struct Curl_easy *data)
{
struct Curl_multi *multi = data->multi;
+ struct ossl_x509_share *share;
X509_STORE *store = NULL;
DEBUGASSERT(multi);
- if(multi &&
- multi->ssl_backend_data &&
- multi->ssl_backend_data->store &&
- !cached_x509_store_expired(data, multi->ssl_backend_data) &&
- !cached_x509_store_different(cf, multi->ssl_backend_data)) {
- store = multi->ssl_backend_data->store;
+ share = multi? Curl_hash_pick(&multi->proto_hash,
+ (void *)MPROTO_OSSL_X509_KEY,
+ sizeof(MPROTO_OSSL_X509_KEY)-1) : NULL;
+ if(share && share->store &&
+ !cached_x509_store_expired(data, share) &&
+ !cached_x509_store_different(cf, share)) {
+ store = share->store;
}
return store;
@@ -3404,20 +3345,28 @@ static void set_cached_x509_store(struct Curl_cfilter *cf,
{
struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
struct Curl_multi *multi = data->multi;
- struct multi_ssl_backend_data *mbackend;
+ struct ossl_x509_share *share;
DEBUGASSERT(multi);
if(!multi)
return;
+ share = Curl_hash_pick(&multi->proto_hash,
+ (void *)MPROTO_OSSL_X509_KEY,
+ sizeof(MPROTO_OSSL_X509_KEY)-1);
- if(!multi->ssl_backend_data) {
- multi->ssl_backend_data = calloc(1, sizeof(struct multi_ssl_backend_data));
- if(!multi->ssl_backend_data)
+ if(!share) {
+ share = calloc(1, sizeof(*share));
+ if(!share)
return;
+ if(!Curl_hash_add2(&multi->proto_hash,
+ (void *)MPROTO_OSSL_X509_KEY,
+ sizeof(MPROTO_OSSL_X509_KEY)-1,
+ share, oss_x509_share_free)) {
+ free(share);
+ return;
+ }
}
- mbackend = multi->ssl_backend_data;
-
if(X509_STORE_up_ref(store)) {
char *CAfile = NULL;
@@ -3429,14 +3378,14 @@ static void set_cached_x509_store(struct Curl_cfilter *cf,
}
}
- if(mbackend->store) {
- X509_STORE_free(mbackend->store);
- free(mbackend->CAfile);
+ if(share->store) {
+ X509_STORE_free(share->store);
+ free(share->CAfile);
}
- mbackend->time = Curl_now();
- mbackend->store = store;
- mbackend->CAfile = CAfile;
+ share->time = Curl_now();
+ share->store = store;
+ share->CAfile = CAfile;
}
}
@@ -3451,7 +3400,7 @@ CURLcode Curl_ssl_setup_x509_store(struct Curl_cfilter *cf,
bool cache_criteria_met;
/* Consider the X509 store cacheable if it comes exclusively from a CAfile,
- or no source is provided and we are falling back to openssl's built-in
+ or no source is provided and we are falling back to OpenSSL's built-in
default. */
cache_criteria_met = (data->set.general_ssl.ca_cache_timeout != 0) &&
conn_config->verifypeer &&
@@ -3504,15 +3453,12 @@ CURLcode Curl_ossl_ctx_init(struct ossl_ctx *octx,
void *ssl_sessionid = NULL;
struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
struct ssl_config_data *ssl_config = Curl_ssl_cf_get_config(cf, data);
- const long int ssl_version = conn_config->version;
+ const long int ssl_version_min = conn_config->version;
char * const ssl_cert = ssl_config->primary.clientcert;
const struct curl_blob *ssl_cert_blob = ssl_config->primary.cert_blob;
const char * const ssl_cert_type = ssl_config->cert_type;
const bool verifypeer = conn_config->verifypeer;
char error_buffer[256];
-#ifdef USE_ECH
- struct ssl_connect_data *connssl = cf->ctx;
-#endif
/* Make funny stuff to get random input */
result = ossl_seed(data);
@@ -3523,8 +3469,8 @@ CURLcode Curl_ossl_ctx_init(struct ossl_ctx *octx,
switch(transport) {
case TRNSPRT_TCP:
- /* check to see if we've been told to use an explicit SSL/TLS version */
- switch(ssl_version) {
+ /* check to see if we have been told to use an explicit SSL/TLS version */
+ switch(ssl_version_min) {
case CURL_SSLVERSION_DEFAULT:
case CURL_SSLVERSION_TLSv1:
case CURL_SSLVERSION_TLSv1_0:
@@ -3550,11 +3496,12 @@ CURLcode Curl_ossl_ctx_init(struct ossl_ctx *octx,
}
break;
case TRNSPRT_QUIC:
- if((ssl_version != CURL_SSLVERSION_DEFAULT) &&
- (ssl_version < CURL_SSLVERSION_TLSv1_3)) {
+ if(conn_config->version_max &&
+ (conn_config->version_max != CURL_SSLVERSION_MAX_TLSv1_3)) {
failf(data, "QUIC needs at least TLS version 1.3");
return CURLE_SSL_CONNECT_ERROR;
- }
+ }
+
#ifdef USE_OPENSSL_QUIC
req_method = OSSL_QUIC_client_method();
#elif (OPENSSL_VERSION_NUMBER >= 0x10100000L)
@@ -3573,7 +3520,7 @@ CURLcode Curl_ossl_ctx_init(struct ossl_ctx *octx,
octx->ssl_ctx = SSL_CTX_new(req_method);
if(!octx->ssl_ctx) {
- failf(data, "SSL: couldn't create a context: %s",
+ failf(data, "SSL: could not create a context: %s",
ossl_strerror(ERR_peek_error(), error_buffer, sizeof(error_buffer)));
return CURLE_OUT_OF_MEMORY;
}
@@ -3594,12 +3541,12 @@ CURLcode Curl_ossl_ctx_init(struct ossl_ctx *octx,
/* OpenSSL contains code to work around lots of bugs and flaws in various
SSL-implementations. SSL_CTX_set_options() is used to enabled those
- work-arounds. The man page for this option states that SSL_OP_ALL enables
+ work-arounds. The manpage for this option states that SSL_OP_ALL enables
all the work-arounds and that "It is usually safe to use SSL_OP_ALL to
enable the bug workaround options if compatibility with somewhat broken
implementations is desired."
- The "-no_ticket" option was introduced in OpenSSL 0.9.8j. It's a flag to
+ The "-no_ticket" option was introduced in OpenSSL 0.9.8j. it is a flag to
disable "rfc4507bis session ticket support". rfc4507bis was later turned
into the proper RFC5077: https://datatracker.ietf.org/doc/html/rfc5077
@@ -3640,17 +3587,17 @@ CURLcode Curl_ossl_ctx_init(struct ossl_ctx *octx,
#ifdef SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG
/* mitigate CVE-2010-4180 */
- ctx_options &= ~SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG;
+ ctx_options &= ~(ctx_option_t)SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG;
#endif
#ifdef SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS
/* unless the user explicitly asks to allow the protocol vulnerability we
use the work-around */
if(!ssl_config->enable_beast)
- ctx_options &= ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS;
+ ctx_options &= ~(ctx_option_t)SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS;
#endif
- switch(ssl_version) {
+ switch(ssl_version_min) {
case CURL_SSLVERSION_SSLv2:
case CURL_SSLVERSION_SSLv3:
return CURLE_NOT_BUILT_IN;
@@ -3752,7 +3699,7 @@ CURLcode Curl_ossl_ctx_init(struct ossl_ctx *octx,
infof(data, "Using TLS-SRP username: %s", ssl_username);
if(!SSL_CTX_set_srp_username(octx->ssl_ctx, ssl_username)) {
- failf(data, "Unable to set SRP user name");
+ failf(data, "Unable to set SRP username");
return CURLE_BAD_FUNCTION_ARGUMENT;
}
if(!SSL_CTX_set_srp_password(octx->ssl_ctx, ssl_password)) {
@@ -3785,7 +3732,7 @@ CURLcode Curl_ossl_ctx_init(struct ossl_ctx *octx,
#endif
if(cb_new_session) {
- /* Enable the session cache because it's a prerequisite for the
+ /* Enable the session cache because it is a prerequisite for the
* "new session" callback. Use the "external storage" mode to prevent
* OpenSSL from creating an internal session cache.
*/
@@ -3821,7 +3768,7 @@ CURLcode Curl_ossl_ctx_init(struct ossl_ctx *octx,
SSL_free(octx->ssl);
octx->ssl = SSL_new(octx->ssl_ctx);
if(!octx->ssl) {
- failf(data, "SSL: couldn't create a context (handle)");
+ failf(data, "SSL: could not create a context (handle)");
return CURLE_OUT_OF_MEMORY;
}
@@ -3876,7 +3823,7 @@ CURLcode Curl_ossl_ctx_init(struct ossl_ctx *octx,
ech_config_len = 2 * strlen(b64);
result = Curl_base64_decode(b64, &ech_config, &ech_config_len);
if(result || !ech_config) {
- infof(data, "ECH: can't base64 decode ECHConfig from command line");
+ infof(data, "ECH: cannot base64 decode ECHConfig from command line");
if(data->set.tls_ech & CURLECH_HARD)
return result;
}
@@ -3910,7 +3857,8 @@ CURLcode Curl_ossl_ctx_init(struct ossl_ctx *octx,
else {
struct Curl_dns_entry *dns = NULL;
- dns = Curl_fetch_addr(data, connssl->peer.hostname, connssl->peer.port);
+ if(peer->hostname)
+ dns = Curl_fetch_addr(data, peer->hostname, peer->port);
if(!dns) {
infof(data, "ECH: requested but no DNS info available");
if(data->set.tls_ech & CURLECH_HARD)
@@ -3940,7 +3888,7 @@ CURLcode Curl_ossl_ctx_init(struct ossl_ctx *octx,
# endif
else {
trying_ech_now = 1;
- infof(data, "ECH: imported ECHConfigList of length %ld", elen);
+ infof(data, "ECH: imported ECHConfigList of length %zu", elen);
}
}
else {
@@ -3953,15 +3901,15 @@ CURLcode Curl_ossl_ctx_init(struct ossl_ctx *octx,
}
# ifdef OPENSSL_IS_BORINGSSL
if(trying_ech_now && outername) {
- infof(data, "ECH: setting public_name not supported with boringssl");
+ infof(data, "ECH: setting public_name not supported with BoringSSL");
return CURLE_SSL_CONNECT_ERROR;
}
# else
if(trying_ech_now && outername) {
infof(data, "ECH: inner: '%s', outer: '%s'",
- connssl->peer.hostname, outername);
+ peer->hostname ? peer->hostname : "NULL", outername);
result = SSL_ech_set_server_names(octx->ssl,
- connssl->peer.hostname, outername,
+ peer->hostname, outername,
0 /* do send outer */);
if(result != 1) {
infof(data, "ECH: rv failed to set server name(s) %d [ERROR]", result);
@@ -3971,7 +3919,7 @@ CURLcode Curl_ossl_ctx_init(struct ossl_ctx *octx,
# endif /* not BORING */
if(trying_ech_now
&& SSL_set_min_proto_version(octx->ssl, TLS1_3_VERSION) != 1) {
- infof(data, "ECH: Can't force TLSv1.3 [ERROR]");
+ infof(data, "ECH: cannot force TLSv1.3 [ERROR]");
return CURLE_SSL_CONNECT_ERROR;
}
}
@@ -3980,7 +3928,7 @@ CURLcode Curl_ossl_ctx_init(struct ossl_ctx *octx,
#endif
octx->reused_session = FALSE;
- if(ssl_config->primary.sessionid && transport == TRNSPRT_TCP) {
+ if(ssl_config->primary.cache_session && transport == TRNSPRT_TCP) {
Curl_ssl_sessionid_lock(data);
if(!Curl_ssl_getsessionid(cf, data, peer, &ssl_sessionid, NULL)) {
/* we got a session id, use it! */
@@ -4041,7 +3989,7 @@ static CURLcode ossl_connect_step1(struct Curl_cfilter *cf,
/* with OpenSSL v1.1.1 we get an alternative to SSL_set_bio() that works
* without backward compat quirks. Every call takes one reference, so we
* up it and pass. SSL* then owns it and will free.
- * We check on the function in configure, since libressl and friends
+ * We check on the function in configure, since LibreSSL and friends
* each have their own versions to add support for this. */
BIO_up_ref(bio);
SSL_set0_rbio(octx->ssl, bio);
@@ -4131,11 +4079,10 @@ static CURLcode ossl_connect_step2(struct Curl_cfilter *cf,
struct ssl_connect_data *connssl = cf->ctx;
struct ossl_ctx *octx = (struct ossl_ctx *)connssl->backend;
struct ssl_config_data *ssl_config = Curl_ssl_cf_get_config(cf, data);
- DEBUGASSERT(ssl_connect_2 == connssl->connecting_state
- || ssl_connect_2_reading == connssl->connecting_state
- || ssl_connect_2_writing == connssl->connecting_state);
+ DEBUGASSERT(ssl_connect_2 == connssl->connecting_state);
DEBUGASSERT(octx);
+ connssl->io_need = CURL_SSL_IO_NEED_NONE;
ERR_clear_error();
err = SSL_connect(octx->ssl);
@@ -4150,14 +4097,11 @@ static CURLcode ossl_connect_step2(struct Curl_cfilter *cf,
}
#ifndef HAVE_KEYLOG_CALLBACK
- if(Curl_tls_keylog_enabled()) {
- /* If key logging is enabled, wait for the handshake to complete and then
- * proceed with logging secrets (for TLS 1.2 or older).
- */
- bool done = FALSE;
- ossl_log_tls12_secret(octx->ssl, &done);
- octx->keylog_done = done;
- }
+ /* If key logging is enabled, wait for the handshake to complete and then
+ * proceed with logging secrets (for TLS 1.2 or older).
+ */
+ if(Curl_tls_keylog_enabled() && !octx->keylog_done)
+ ossl_log_tls12_secret(octx->ssl, &octx->keylog_done);
#endif
/* 1 is fine
@@ -4167,11 +4111,11 @@ static CURLcode ossl_connect_step2(struct Curl_cfilter *cf,
int detail = SSL_get_error(octx->ssl, err);
if(SSL_ERROR_WANT_READ == detail) {
- connssl->connecting_state = ssl_connect_2_reading;
+ connssl->io_need = CURL_SSL_IO_NEED_RECV;
return CURLE_OK;
}
if(SSL_ERROR_WANT_WRITE == detail) {
- connssl->connecting_state = ssl_connect_2_writing;
+ connssl->io_need = CURL_SSL_IO_NEED_SEND;
return CURLE_OK;
}
#ifdef SSL_ERROR_WANT_ASYNC
@@ -4198,7 +4142,7 @@ static CURLcode ossl_connect_step2(struct Curl_cfilter *cf,
int lib;
int reason;
- /* the connection failed, we're not waiting for anything else. */
+ /* the connection failed, we are not waiting for anything else. */
connssl->connecting_state = ssl_connect_2;
/* Get the earliest error code from the thread's error queue and remove
@@ -4259,7 +4203,7 @@ static CURLcode ossl_connect_step2(struct Curl_cfilter *cf,
/* detail is already set to the SSL error above */
- /* If we e.g. use SSLv2 request-method and the server doesn't like us
+ /* If we e.g. use SSLv2 request-method and the server does not like us
* (RST connection, etc.), OpenSSL gives no explanation whatsoever and
* the SO_ERROR is also lost.
*/
@@ -4285,7 +4229,7 @@ static CURLcode ossl_connect_step2(struct Curl_cfilter *cf,
int psigtype_nid = NID_undef;
const char *negotiated_group_name = NULL;
- /* we connected fine, we're not waiting for anything else. */
+ /* we connected fine, we are not waiting for anything else. */
connssl->connecting_state = ssl_connect_3;
#if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
@@ -4398,7 +4342,7 @@ static CURLcode ossl_pkp_pin_peer_pubkey(struct Curl_easy *data, X509* cert,
/* Result is returned to caller */
CURLcode result = CURLE_SSL_PINNEDPUBKEYNOTMATCH;
- /* if a path wasn't specified, don't pin */
+ /* if a path was not specified, do not pin */
if(!pinnedpubkey)
return CURLE_OK;
@@ -4424,7 +4368,7 @@ static CURLcode ossl_pkp_pin_peer_pubkey(struct Curl_easy *data, X509* cert,
/*
* These checks are verifying we got back the same values as when we
- * sized the buffer. It's pretty weak since they should always be the
+ * sized the buffer. it is pretty weak since they should always be the
* same. But it gives us something to test.
*/
if((len1 != len2) || !temp || ((temp - buff1) != len1))
@@ -4551,7 +4495,7 @@ CURLcode Curl_oss_check_peer_cert(struct Curl_cfilter *cf,
if(!strict)
return CURLE_OK;
- failf(data, "SSL: couldn't get peer certificate");
+ failf(data, "SSL: could not get peer certificate");
return CURLE_PEER_FAILED_VERIFICATION;
}
@@ -4592,7 +4536,7 @@ CURLcode Curl_oss_check_peer_cert(struct Curl_cfilter *cf,
buffer, sizeof(buffer));
if(rc) {
if(strict)
- failf(data, "SSL: couldn't get X509-issuer name");
+ failf(data, "SSL: could not get X509-issuer name");
result = CURLE_PEER_FAILED_VERIFICATION;
}
else {
@@ -4695,8 +4639,8 @@ CURLcode Curl_oss_check_peer_cert(struct Curl_cfilter *cf,
#if (OPENSSL_VERSION_NUMBER >= 0x0090808fL) && !defined(OPENSSL_NO_TLSEXT) && \
!defined(OPENSSL_NO_OCSP)
if(conn_config->verifystatus && !octx->reused_session) {
- /* don't do this after Session ID reuse */
- result = verifystatus(cf, data);
+ /* do not do this after Session ID reuse */
+ result = verifystatus(cf, data, octx);
if(result) {
/* when verifystatus failed, remove the session id from the cache again
if present */
@@ -4721,7 +4665,7 @@ CURLcode Curl_oss_check_peer_cert(struct Curl_cfilter *cf,
#endif
if(!strict)
- /* when not strict, we don't bother about the verify cert problems */
+ /* when not strict, we do not bother about the verify cert problems */
result = CURLE_OK;
#ifndef CURL_DISABLE_PROXY
@@ -4754,7 +4698,7 @@ static CURLcode ossl_connect_step3(struct Curl_cfilter *cf,
/*
* We check certificates to authenticate the server; otherwise we risk
- * man-in-the-middle attack; NEVERTHELESS, if we're told explicitly not to
+ * man-in-the-middle attack; NEVERTHELESS, if we are told explicitly not to
* verify the peer, ignore faults and failures from the server cert
* operations.
*/
@@ -4783,7 +4727,7 @@ static CURLcode ossl_connect_common(struct Curl_cfilter *cf,
}
if(ssl_connect_1 == connssl->connecting_state) {
- /* Find out how much more time we're allowed */
+ /* Find out how much more time we are allowed */
const timediff_t timeout_ms = Curl_timeleft(data, NULL, TRUE);
if(timeout_ms < 0) {
@@ -4797,9 +4741,7 @@ static CURLcode ossl_connect_common(struct Curl_cfilter *cf,
goto out;
}
- while(ssl_connect_2 == connssl->connecting_state ||
- ssl_connect_2_reading == connssl->connecting_state ||
- ssl_connect_2_writing == connssl->connecting_state) {
+ while(ssl_connect_2 == connssl->connecting_state) {
/* check allowed time left */
const timediff_t timeout_ms = Curl_timeleft(data, NULL, TRUE);
@@ -4811,15 +4753,13 @@ static CURLcode ossl_connect_common(struct Curl_cfilter *cf,
goto out;
}
- /* if ssl is expecting something, check if it's available. */
- if(!nonblocking &&
- (connssl->connecting_state == ssl_connect_2_reading ||
- connssl->connecting_state == ssl_connect_2_writing)) {
+ /* if ssl is expecting something, check if it is available. */
+ if(!nonblocking && connssl->io_need) {
- curl_socket_t writefd = ssl_connect_2_writing ==
- connssl->connecting_state?sockfd:CURL_SOCKET_BAD;
- curl_socket_t readfd = ssl_connect_2_reading ==
- connssl->connecting_state?sockfd:CURL_SOCKET_BAD;
+ curl_socket_t writefd = (connssl->io_need & CURL_SSL_IO_NEED_SEND)?
+ sockfd:CURL_SOCKET_BAD;
+ curl_socket_t readfd = (connssl->io_need & CURL_SSL_IO_NEED_RECV)?
+ sockfd:CURL_SOCKET_BAD;
what = Curl_socket_check(readfd, CURL_SOCKET_BAD, writefd,
timeout_ms);
@@ -4845,10 +4785,7 @@ static CURLcode ossl_connect_common(struct Curl_cfilter *cf,
* or epoll() will always have a valid fdset to wait on.
*/
result = ossl_connect_step2(cf, data);
- if(result || (nonblocking &&
- (ssl_connect_2 == connssl->connecting_state ||
- ssl_connect_2_reading == connssl->connecting_state ||
- ssl_connect_2_writing == connssl->connecting_state)))
+ if(result || (nonblocking && (ssl_connect_2 == connssl->connecting_state)))
goto out;
} /* repeat step2 until all transactions are done. */
@@ -5031,7 +4968,7 @@ static ssize_t ossl_recv(struct Curl_cfilter *cf,
break;
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
- /* there's data pending, re-invoke SSL_read() */
+ /* there is data pending, re-invoke SSL_read() */
*curlcode = CURLE_AGAIN;
nread = -1;
goto out;
@@ -5065,7 +5002,7 @@ static ssize_t ossl_recv(struct Curl_cfilter *cf,
/* For debug builds be a little stricter and error on any
SSL_ERROR_SYSCALL. For example a server may have closed the connection
abruptly without a close_notify alert. For compatibility with older
- peers we don't do this by default. #4624
+ peers we do not do this by default. #4624
We can use this to gauge how many users may be affected, and
if it goes ok eventually transition to allow in dev and release with
@@ -5099,7 +5036,7 @@ static size_t ossl_version(char *buffer, size_t size)
#ifdef LIBRESSL_VERSION_NUMBER
#ifdef HAVE_OPENSSL_VERSION
char *p;
- int count;
+ size_t count;
const char *ver = OpenSSL_version(OPENSSL_VERSION);
const char expected[] = OSSL_PACKAGE " "; /* ie "LibreSSL " */
if(strncasecompare(ver, expected, sizeof(expected) - 1)) {
@@ -5181,14 +5118,14 @@ static CURLcode ossl_random(struct Curl_easy *data,
int rc;
if(data) {
if(ossl_seed(data)) /* Initiate the seed if not already done */
- return CURLE_FAILED_INIT; /* couldn't seed for some reason */
+ return CURLE_FAILED_INIT; /* could not seed for some reason */
}
else {
if(!rand_enough())
return CURLE_FAILED_INIT;
}
/* RAND_bytes() returns 1 on success, 0 otherwise. */
- rc = RAND_bytes(entropy, curlx_uztosi(length));
+ rc = RAND_bytes(entropy, (ossl_valsize_t)curlx_uztosi(length));
return (rc == 1 ? CURLE_OK : CURLE_FAILED_INIT);
}
@@ -5236,20 +5173,6 @@ static void *ossl_get_internals(struct ssl_connect_data *connssl,
(void *)octx->ssl_ctx : (void *)octx->ssl;
}
-static void ossl_free_multi_ssl_backend_data(
- struct multi_ssl_backend_data *mbackend)
-{
-#if defined(HAVE_SSL_X509_STORE_SHARE)
- if(mbackend->store) {
- X509_STORE_free(mbackend->store);
- }
- free(mbackend->CAfile);
- free(mbackend);
-#else /* HAVE_SSL_X509_STORE_SHARE */
- (void)mbackend;
-#endif /* HAVE_SSL_X509_STORE_SHARE */
-}
-
const struct Curl_ssl Curl_ssl_openssl = {
{ CURLSSLBACKEND_OPENSSL, "openssl" }, /* info */
@@ -5264,6 +5187,7 @@ const struct Curl_ssl Curl_ssl_openssl = {
#ifdef USE_ECH
SSLSUPP_ECH |
#endif
+ SSLSUPP_CA_CACHE |
SSLSUPP_HTTPS_PROXY,
sizeof(struct ossl_ctx),
@@ -5293,7 +5217,6 @@ const struct Curl_ssl Curl_ssl_openssl = {
#endif
NULL, /* use of data in this connection */
NULL, /* remote of data from this connection */
- ossl_free_multi_ssl_backend_data, /* free_multi_ssl_backend_data */
ossl_recv, /* recv decrypted data */
ossl_send, /* send data to encrypt */
};
diff --git a/libs/libcurl/src/vtls/openssl.h b/libs/libcurl/src/vtls/openssl.h
index f46fbf3722..46e75efbf1 100644
--- a/libs/libcurl/src/vtls/openssl.h
+++ b/libs/libcurl/src/vtls/openssl.h
@@ -45,8 +45,9 @@ struct ossl_ctx {
BIO_METHOD *bio_method;
CURLcode io_result; /* result of last BIO cfilter operation */
#ifndef HAVE_KEYLOG_CALLBACK
- /* Set to true once a valid keylog entry has been created to avoid dupes. */
- BIT(keylog_done);
+ /* Set to true once a valid keylog entry has been created to avoid dupes.
+ This is a bool and not a bitfield because it is passed by address. */
+ bool keylog_done;
#endif
BIT(x509_store_setup); /* x509 store has been set up */
BIT(reused_session); /* session-ID was reused for this */
diff --git a/libs/libcurl/src/vtls/rustls.c b/libs/libcurl/src/vtls/rustls.c
index 0e65c37f37..70592e6f76 100644
--- a/libs/libcurl/src/vtls/rustls.c
+++ b/libs/libcurl/src/vtls/rustls.c
@@ -48,6 +48,7 @@ struct rustls_ssl_backend_data
struct rustls_connection *conn;
size_t plain_out_buffered;
BIT(data_in_pending);
+ BIT(sent_shutdown);
};
/* For a given rustls_result error code, return the best-matching CURLcode. */
@@ -101,7 +102,7 @@ read_cb(void *userdata, uint8_t *buf, uintptr_t len, uintptr_t *out_n)
}
else if(nread == 0)
connssl->peer_closed = TRUE;
- *out_n = (int)nread;
+ *out_n = (uintptr_t)nread;
CURL_TRC_CF(io_ctx->data, io_ctx->cf, "cf->next recv(len=%zu) -> %zd, %d",
len, nread, result);
return ret;
@@ -122,7 +123,7 @@ write_cb(void *userdata, const uint8_t *buf, uintptr_t len, uintptr_t *out_n)
else
ret = EINVAL;
}
- *out_n = (int)nwritten;
+ *out_n = (uintptr_t)nwritten;
CURL_TRC_CF(io_ctx->data, io_ctx->cf, "cf->next send(len=%zu) -> %zd, %d",
len, nwritten, result);
return ret;
@@ -178,10 +179,10 @@ static ssize_t tls_recv_more(struct Curl_cfilter *cf,
* - Read out as many plaintext bytes from rustls as possible, until hitting
* error, EOF, or EAGAIN/EWOULDBLOCK, or plainbuf/plainlen is filled up.
*
- * It's okay to call this function with plainbuf == NULL and plainlen == 0.
- * In that case, it will copy bytes from the socket into rustls' TLS input
- * buffer, and process packets, but won't consume bytes from rustls' plaintext
- * output buffer.
+ * it is okay to call this function with plainbuf == NULL and plainlen == 0. In
+ * that case, it will copy bytes from the socket into rustls' TLS input
+ * buffer, and process packets, but will not consume bytes from rustls'
+ * plaintext output buffer.
*/
static ssize_t
cr_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
@@ -226,7 +227,7 @@ cr_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
goto out;
}
else if(rresult != RUSTLS_RESULT_OK) {
- /* n always equals 0 in this case, don't need to check it */
+ /* n always equals 0 in this case, do not need to check it */
char errorbuf[255];
size_t errorlen;
rustls_error(rresult, errorbuf, sizeof(errorbuf), &errorlen);
@@ -308,8 +309,8 @@ static CURLcode cr_flush_out(struct Curl_cfilter *cf, struct Curl_easy *data,
* - Fully drain rustls' plaintext output buffer into the socket until
* we get either an error or EAGAIN/EWOULDBLOCK.
*
- * It's okay to call this function with plainbuf == NULL and plainlen == 0.
- * In that case, it won't read anything into rustls' plaintext input buffer.
+ * it is okay to call this function with plainbuf == NULL and plainlen == 0.
+ * In that case, it will not read anything into rustls' plaintext input buffer.
* It will only drain rustls' plaintext output buffer into the socket.
*/
static ssize_t
@@ -438,7 +439,7 @@ cr_init_backend(struct Curl_cfilter *cf, struct Curl_easy *data,
const char *hostname = connssl->peer.hostname;
char errorbuf[256];
size_t errorlen;
- int result;
+ rustls_result result;
DEBUGASSERT(backend);
rconn = backend->conn;
@@ -461,7 +462,7 @@ cr_init_backend(struct Curl_cfilter *cf, struct Curl_easy *data,
if(!verifypeer) {
rustls_client_config_builder_dangerous_set_certificate_verifier(
config_builder, cr_verify_none);
- /* rustls doesn't support IP addresses (as of 0.19.0), and will reject
+ /* rustls does not support IP addresses (as of 0.19.0), and will reject
* connections created with an IP address, even when certificate
* verification is turned off. Set a placeholder hostname and disable
* SNI. */
@@ -474,7 +475,7 @@ cr_init_backend(struct Curl_cfilter *cf, struct Curl_easy *data,
roots_builder = rustls_root_cert_store_builder_new();
if(ca_info_blob) {
- /* Enable strict parsing only if verification isn't disabled. */
+ /* Enable strict parsing only if verification is not disabled. */
result = rustls_root_cert_store_builder_add_pem(roots_builder,
ca_info_blob->data,
ca_info_blob->len,
@@ -488,7 +489,7 @@ cr_init_backend(struct Curl_cfilter *cf, struct Curl_easy *data,
}
}
else if(ssl_cafile) {
- /* Enable strict parsing only if verification isn't disabled. */
+ /* Enable strict parsing only if verification is not disabled. */
result = rustls_root_cert_store_builder_load_roots_from_file(
roots_builder, ssl_cafile, verifypeer);
if(result != RUSTLS_RESULT_OK) {
@@ -604,6 +605,7 @@ cr_connect_common(struct Curl_cfilter *cf,
* Connection has been established according to rustls. Set send/recv
* handlers, and update the state machine.
*/
+ connssl->io_need = CURL_SSL_IO_NEED_NONE;
if(!rustls_connection_is_handshaking(rconn)) {
infof(data, "Done handshaking");
/* rustls claims it is no longer handshaking *before* it has
@@ -613,7 +615,7 @@ cr_connect_common(struct Curl_cfilter *cf,
cr_set_negotiated_alpn(cf, data, rconn);
cr_send(cf, data, NULL, 0, &tmperr);
if(tmperr == CURLE_AGAIN) {
- connssl->connecting_state = ssl_connect_2_writing;
+ connssl->io_need = CURL_SSL_IO_NEED_SEND;
return CURLE_OK;
}
else if(tmperr != CURLE_OK) {
@@ -625,6 +627,7 @@ cr_connect_common(struct Curl_cfilter *cf,
return CURLE_OK;
}
+ connssl->connecting_state = ssl_connect_2;
wants_read = rustls_connection_wants_read(rconn);
wants_write = rustls_connection_wants_write(rconn) ||
backend->plain_out_buffered;
@@ -632,8 +635,6 @@ cr_connect_common(struct Curl_cfilter *cf,
writefd = wants_write?sockfd:CURL_SOCKET_BAD;
readfd = wants_read?sockfd:CURL_SOCKET_BAD;
- connssl->connecting_state = wants_write?
- ssl_connect_2_writing : ssl_connect_2_reading;
/* check allowed time left */
timeout_ms = Curl_timeleft(data, NULL, TRUE);
@@ -661,6 +662,10 @@ cr_connect_common(struct Curl_cfilter *cf,
CURL_TRC_CF(data, cf, "Curl_socket_check: %s would block",
wants_read&&wants_write ? "writing and reading" :
wants_write ? "writing" : "reading");
+ if(wants_write)
+ connssl->io_need |= CURL_SSL_IO_NEED_SEND;
+ if(wants_read)
+ connssl->io_need |= CURL_SSL_IO_NEED_RECV;
return CURLE_OK;
}
/* socket is readable or writable */
@@ -695,7 +700,7 @@ cr_connect_common(struct Curl_cfilter *cf,
}
/* We should never fall through the loop. We should return either because
- the handshake is done or because we can't read/write without blocking. */
+ the handshake is done or because we cannot read/write without blocking. */
DEBUGASSERT(false);
}
@@ -723,24 +728,85 @@ cr_get_internals(struct ssl_connect_data *connssl,
return &backend->conn;
}
-static void
-cr_close(struct Curl_cfilter *cf, struct Curl_easy *data)
+static CURLcode
+cr_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ bool send_shutdown, bool *done)
{
struct ssl_connect_data *connssl = cf->ctx;
struct rustls_ssl_backend_data *backend =
(struct rustls_ssl_backend_data *)connssl->backend;
- CURLcode tmperr = CURLE_OK;
- ssize_t n = 0;
+ CURLcode result = CURLE_OK;
+ ssize_t nwritten, nread;
+ char buf[1024];
+ size_t i;
DEBUGASSERT(backend);
- if(backend->conn && !connssl->peer_closed) {
- CURL_TRC_CF(data, cf, "closing connection, send notify");
- rustls_connection_send_close_notify(backend->conn);
- n = cr_send(cf, data, NULL, 0, &tmperr);
- if(n < 0) {
- failf(data, "rustls: error sending close_notify: %d", tmperr);
+ if(!backend->conn || cf->shutdown) {
+ *done = TRUE;
+ goto out;
+ }
+
+ connssl->io_need = CURL_SSL_IO_NEED_NONE;
+ *done = FALSE;
+
+ if(!backend->sent_shutdown) {
+ /* do this only once */
+ backend->sent_shutdown = TRUE;
+ if(send_shutdown) {
+ rustls_connection_send_close_notify(backend->conn);
}
+ }
+
+ nwritten = cr_send(cf, data, NULL, 0, &result);
+ if(nwritten < 0) {
+ if(result == CURLE_AGAIN) {
+ connssl->io_need = CURL_SSL_IO_NEED_SEND;
+ result = CURLE_OK;
+ goto out;
+ }
+ DEBUGASSERT(result);
+ CURL_TRC_CF(data, cf, "shutdown send failed: %d", result);
+ goto out;
+ }
+
+ for(i = 0; i < 10; ++i) {
+ nread = cr_recv(cf, data, buf, (int)sizeof(buf), &result);
+ if(nread <= 0)
+ break;
+ }
+ if(nread > 0) {
+ /* still data coming in? */
+ }
+ else if(nread == 0) {
+ /* We got the close notify alert and are done. */
+ *done = TRUE;
+ }
+ else if(result == CURLE_AGAIN) {
+ connssl->io_need = CURL_SSL_IO_NEED_RECV;
+ result = CURLE_OK;
+ }
+ else {
+ DEBUGASSERT(result);
+ CURL_TRC_CF(data, cf, "shutdown, error: %d", result);
+ }
+
+out:
+ cf->shutdown = (result || *done);
+ return result;
+}
+
+static void
+cr_close(struct Curl_cfilter *cf, struct Curl_easy *data)
+{
+ struct ssl_connect_data *connssl = cf->ctx;
+ struct rustls_ssl_backend_data *backend =
+ (struct rustls_ssl_backend_data *)connssl->backend;
+
+ (void)data;
+ DEBUGASSERT(backend);
+ if(backend->conn) {
rustls_connection_free(backend->conn);
backend->conn = NULL;
}
@@ -766,7 +832,7 @@ const struct Curl_ssl Curl_ssl_rustls = {
Curl_none_cleanup, /* cleanup */
cr_version, /* version */
Curl_none_check_cxn, /* check_cxn */
- Curl_none_shutdown, /* shutdown */
+ cr_shutdown, /* shutdown */
cr_data_pending, /* data_pending */
Curl_none_random, /* random */
Curl_none_cert_status_request, /* cert_status_request */
@@ -783,7 +849,6 @@ const struct Curl_ssl Curl_ssl_rustls = {
NULL, /* sha256sum */
NULL, /* associate_connection */
NULL, /* disassociate_connection */
- NULL, /* free_multi_ssl_backend_data */
cr_recv, /* recv decrypted data */
cr_send, /* send data to encrypt */
};
diff --git a/libs/libcurl/src/vtls/schannel.c b/libs/libcurl/src/vtls/schannel.c
index d172b567c4..6fef076099 100644
--- a/libs/libcurl/src/vtls/schannel.c
+++ b/libs/libcurl/src/vtls/schannel.c
@@ -34,7 +34,7 @@
#ifdef USE_SCHANNEL
#ifndef USE_WINDOWS_SSPI
-# error "Can't compile SCHANNEL support without SSPI."
+# error "cannot compile SCHANNEL support without SSPI."
#endif
#include "schannel.h"
@@ -171,7 +171,7 @@ schannel_set_ssl_version_min_max(DWORD *enabled_protocols,
{
struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
long ssl_version = conn_config->version;
- long ssl_version_max = conn_config->version_max;
+ long ssl_version_max = (long)conn_config->version_max;
long i = ssl_version;
switch(ssl_version_max) {
@@ -364,7 +364,7 @@ set_ssl_ciphers(SCHANNEL_CRED *schannel_cred, char *ciphers,
if(!alg)
alg = get_alg_id_by_name(startCur);
if(alg)
- algIds[algCount++] = alg;
+ algIds[algCount++] = (ALG_ID)alg;
else if(!strncmp(startCur, "USE_STRONG_CRYPTO",
sizeof("USE_STRONG_CRYPTO") - 1) ||
!strncmp(startCur, "SCH_USE_STRONG_CRYPTO",
@@ -377,7 +377,7 @@ set_ssl_ciphers(SCHANNEL_CRED *schannel_cred, char *ciphers,
startCur++;
}
schannel_cred->palgSupportedAlgs = algIds;
- schannel_cred->cSupportedAlgs = algCount;
+ schannel_cred->cSupportedAlgs = (DWORD)algCount;
return CURLE_OK;
}
@@ -513,7 +513,7 @@ schannel_acquire_credential_handle(struct Curl_cfilter *cf,
}
if(!ssl_config->auto_client_cert) {
- flags &= ~SCH_CRED_USE_DEFAULT_CREDS;
+ flags &= ~(DWORD)SCH_CRED_USE_DEFAULT_CREDS;
flags |= SCH_CRED_NO_DEFAULT_CREDS;
infof(data, "schannel: disabled automatic use of client certificate");
}
@@ -950,7 +950,7 @@ schannel_acquire_credential_handle(struct Curl_cfilter *cf,
tls_parameters.pDisabledCrypto = crypto_settings;
/* The number of blocked suites */
- tls_parameters.cDisabledCrypto = crypto_settings_idx;
+ tls_parameters.cDisabledCrypto = (DWORD)crypto_settings_idx;
credentials.pTlsParameters = &tls_parameters;
credentials.cTlsParameters = 1;
@@ -976,7 +976,7 @@ schannel_acquire_credential_handle(struct Curl_cfilter *cf,
}
else {
/* Pre-Windows 10 1809 or the user set a legacy algorithm list. Although MS
- doesn't document it, currently Schannel will not negotiate TLS 1.3 when
+ does not document it, currently Schannel will not negotiate TLS 1.3 when
SCHANNEL_CRED is used. */
ALG_ID algIds[NUM_CIPHERS];
char *ciphers = conn_config->cipher_list;
@@ -1083,7 +1083,7 @@ schannel_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
#ifdef HAS_ALPN
/* ALPN is only supported on Windows 8.1 / Server 2012 R2 and above.
- Also it doesn't seem to be supported for Wine, see curl bug #983. */
+ Also it does not seem to be supported for Wine, see curl bug #983. */
backend->use_alpn = connssl->alpn &&
!GetProcAddress(GetModuleHandle(TEXT("ntdll")),
"wine_get_version") &&
@@ -1095,7 +1095,7 @@ schannel_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
#ifdef _WIN32_WCE
#ifdef HAS_MANUAL_VERIFY_API
- /* certificate validation on CE doesn't seem to work right; we'll
+ /* certificate validation on CE does not seem to work right; we will
* do it following a more manual process. */
backend->use_manual_cred_validation = true;
#else
@@ -1127,7 +1127,7 @@ schannel_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
backend->cred = NULL;
/* check for an existing reusable credential handle */
- if(ssl_config->primary.sessionid) {
+ if(ssl_config->primary.cache_session) {
Curl_ssl_sessionid_lock(data);
if(!Curl_ssl_getsessionid(cf, data, &connssl->peer,
(void **)&old_cred, NULL)) {
@@ -1241,7 +1241,7 @@ schannel_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
/* Schannel InitializeSecurityContext:
https://msdn.microsoft.com/en-us/library/windows/desktop/aa375924.aspx
- At the moment we don't pass inbuf unless we're using ALPN since we only
+ At the moment we do not pass inbuf unless we are using ALPN since we only
use it for that, and Wine (for which we currently disable ALPN) is giving
us problems with inbuf regardless. https://github.com/curl/curl/issues/983
*/
@@ -1332,7 +1332,8 @@ schannel_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data)
DEBUGASSERT(backend);
- doread = (connssl->connecting_state != ssl_connect_2_writing) ? TRUE : FALSE;
+ doread = (connssl->io_need & CURL_SSL_IO_NEED_SEND)? FALSE : TRUE;
+ connssl->io_need = CURL_SSL_IO_NEED_NONE;
DEBUGF(infof(data,
"schannel: SSL/TLS connection with %s port %d (step 2/3)",
@@ -1393,8 +1394,7 @@ schannel_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data)
backend->encdata_offset,
&result);
if(result == CURLE_AGAIN) {
- if(connssl->connecting_state != ssl_connect_2_writing)
- connssl->connecting_state = ssl_connect_2_reading;
+ connssl->io_need = CURL_SSL_IO_NEED_RECV;
DEBUGF(infof(data, "schannel: failed to receive handshake, "
"need more data"));
return CURLE_OK;
@@ -1448,7 +1448,7 @@ schannel_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data)
/* check if the handshake was incomplete */
if(sspi_status == SEC_E_INCOMPLETE_MESSAGE) {
backend->encdata_is_incomplete = true;
- connssl->connecting_state = ssl_connect_2_reading;
+ connssl->io_need = CURL_SSL_IO_NEED_RECV;
DEBUGF(infof(data,
"schannel: received incomplete message, need more data"));
return CURLE_OK;
@@ -1460,7 +1460,7 @@ schannel_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data)
if(sspi_status == SEC_I_INCOMPLETE_CREDENTIALS &&
!(backend->req_flags & ISC_REQ_USE_SUPPLIED_CREDS)) {
backend->req_flags |= ISC_REQ_USE_SUPPLIED_CREDS;
- connssl->connecting_state = ssl_connect_2_writing;
+ connssl->io_need = CURL_SSL_IO_NEED_SEND;
DEBUGF(infof(data,
"schannel: a client certificate has been requested"));
return CURLE_OK;
@@ -1531,7 +1531,7 @@ schannel_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data)
inbuf[1].cbBuffer));
/*
There are two cases where we could be getting extra data here:
- 1) If we're renegotiating a connection and the handshake is already
+ 1) If we are renegotiating a connection and the handshake is already
complete (from the server perspective), it can encrypted app data
(not handshake data) in an extra buffer at this point.
2) (sspi_status == SEC_I_CONTINUE_NEEDED) We are negotiating a
@@ -1560,7 +1560,7 @@ schannel_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data)
/* check if the handshake needs to be continued */
if(sspi_status == SEC_I_CONTINUE_NEEDED) {
- connssl->connecting_state = ssl_connect_2_reading;
+ connssl->io_need = CURL_SSL_IO_NEED_RECV;
return CURLE_OK;
}
@@ -1593,7 +1593,7 @@ schannel_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data)
#endif
/* Verify the hostname manually when certificate verification is disabled,
- because in that case Schannel won't verify it. */
+ because in that case Schannel will not verify it. */
if(!conn_config->verifypeer && conn_config->verifyhost)
return Curl_verify_host(cf, data);
@@ -1772,34 +1772,16 @@ schannel_connect_step3(struct Curl_cfilter *cf, struct Curl_easy *data)
#endif
/* save the current session data for possible reuse */
- if(ssl_config->primary.sessionid) {
- bool incache;
- struct Curl_schannel_cred *old_cred = NULL;
-
+ if(ssl_config->primary.cache_session) {
Curl_ssl_sessionid_lock(data);
- incache = !(Curl_ssl_getsessionid(cf, data, &connssl->peer,
- (void **)&old_cred, NULL));
- if(incache) {
- if(old_cred != backend->cred) {
- DEBUGF(infof(data,
- "schannel: old credential handle is stale, removing"));
- /* we're not taking old_cred ownership here, no refcount++ is needed */
- Curl_ssl_delsessionid(data, (void *)old_cred);
- incache = FALSE;
- }
- }
- if(!incache) {
- /* Up ref count since call takes ownership */
- backend->cred->refcount++;
- result = Curl_ssl_addsessionid(cf, data, &connssl->peer, backend->cred,
- sizeof(struct Curl_schannel_cred),
- schannel_session_free);
- if(result) {
- Curl_ssl_sessionid_unlock(data);
- return result;
- }
- }
+ /* Up ref count since call takes ownership */
+ backend->cred->refcount++;
+ result = Curl_ssl_set_sessionid(cf, data, &connssl->peer, backend->cred,
+ sizeof(struct Curl_schannel_cred),
+ schannel_session_free);
Curl_ssl_sessionid_unlock(data);
+ if(result)
+ return result;
}
if(data->set.ssl.certinfo) {
@@ -1853,7 +1835,7 @@ schannel_connect_common(struct Curl_cfilter *cf,
}
if(ssl_connect_1 == connssl->connecting_state) {
- /* check out how much more time we're allowed */
+ /* check out how much more time we are allowed */
timeout_ms = Curl_timeleft(data, NULL, TRUE);
if(timeout_ms < 0) {
@@ -1867,11 +1849,9 @@ schannel_connect_common(struct Curl_cfilter *cf,
return result;
}
- while(ssl_connect_2 == connssl->connecting_state ||
- ssl_connect_2_reading == connssl->connecting_state ||
- ssl_connect_2_writing == connssl->connecting_state) {
+ while(ssl_connect_2 == connssl->connecting_state) {
- /* check out how much more time we're allowed */
+ /* check out how much more time we are allowed */
timeout_ms = Curl_timeleft(data, NULL, TRUE);
if(timeout_ms < 0) {
@@ -1880,14 +1860,13 @@ schannel_connect_common(struct Curl_cfilter *cf,
return CURLE_OPERATION_TIMEDOUT;
}
- /* if ssl is expecting something, check if it's available. */
- if(connssl->connecting_state == ssl_connect_2_reading
- || connssl->connecting_state == ssl_connect_2_writing) {
+ /* if ssl is expecting something, check if it is available. */
+ if(connssl->io_need) {
- curl_socket_t writefd = ssl_connect_2_writing ==
- connssl->connecting_state ? sockfd : CURL_SOCKET_BAD;
- curl_socket_t readfd = ssl_connect_2_reading ==
- connssl->connecting_state ? sockfd : CURL_SOCKET_BAD;
+ curl_socket_t writefd = (connssl->io_need & CURL_SSL_IO_NEED_SEND)?
+ sockfd : CURL_SOCKET_BAD;
+ curl_socket_t readfd = (connssl->io_need & CURL_SSL_IO_NEED_RECV)?
+ sockfd : CURL_SOCKET_BAD;
what = Curl_socket_check(readfd, CURL_SOCKET_BAD, writefd,
nonblocking ? 0 : timeout_ms);
@@ -1918,10 +1897,7 @@ schannel_connect_common(struct Curl_cfilter *cf,
* have a valid fdset to wait on.
*/
result = schannel_connect_step2(cf, data);
- if(result || (nonblocking &&
- (ssl_connect_2 == connssl->connecting_state ||
- ssl_connect_2_reading == connssl->connecting_state ||
- ssl_connect_2_writing == connssl->connecting_state)))
+ if(result || (nonblocking && (ssl_connect_2 == connssl->connecting_state)))
return result;
} /* repeat step2 until all transactions are done. */
@@ -2029,10 +2005,10 @@ schannel_send(struct Curl_cfilter *cf, struct Curl_easy *data,
len = outbuf[0].cbBuffer + outbuf[1].cbBuffer + outbuf[2].cbBuffer;
/*
- It's important to send the full message which includes the header,
- encrypted payload, and trailer. Until the client receives all the
+ it is important to send the full message which includes the header,
+ encrypted payload, and trailer. Until the client receives all the
data a coherent message has not been delivered and the client
- can't read any of it.
+ cannot read any of it.
If we wanted to buffer the unwritten encrypted bytes, we would
tell the client that all data it has requested to be sent has been
@@ -2129,8 +2105,9 @@ schannel_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
DEBUGASSERT(backend);
/****************************************************************************
- * Don't return or set backend->recv_unrecoverable_err unless in the cleanup.
- * The pattern for return error is set *err, optional infof, goto cleanup.
+ * Do not return or set backend->recv_unrecoverable_err unless in the
+ * cleanup. The pattern for return error is set *err, optional infof, goto
+ * cleanup.
*
* Our priority is to always return as much decrypted data to the caller as
* possible, even if an error occurs. The state of the decrypted buffer must
@@ -2155,7 +2132,7 @@ schannel_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
infof(data, "schannel: server indicated shutdown in a prior call");
goto cleanup;
}
- /* It's debatable what to return when !len. Regardless we can't return
+ /* it is debatable what to return when !len. Regardless we cannot return
immediately because there may be data to decrypt (in the case we want to
decrypt all encrypted cached data) so handle !len later in cleanup.
*/
@@ -2313,14 +2290,15 @@ schannel_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
if(sspi_status == SEC_I_RENEGOTIATE) {
infof(data, "schannel: remote party requests renegotiation");
if(*err && *err != CURLE_AGAIN) {
- infof(data, "schannel: can't renegotiate, an error is pending");
+ infof(data, "schannel: cannot renegotiate, an error is pending");
goto cleanup;
}
/* begin renegotiation */
infof(data, "schannel: renegotiating SSL/TLS connection");
connssl->state = ssl_connection_negotiating;
- connssl->connecting_state = ssl_connect_2_writing;
+ connssl->connecting_state = ssl_connect_2;
+ connssl->io_need = CURL_SSL_IO_NEED_SEND;
backend->recv_renegotiating = true;
*err = schannel_connect_common(cf, data, FALSE, &done);
backend->recv_renegotiating = false;
@@ -2377,13 +2355,13 @@ cleanup:
/* Error if the connection has closed without a close_notify.
- The behavior here is a matter of debate. We don't want to be vulnerable
- to a truncation attack however there's some browser precedent for
+ The behavior here is a matter of debate. We do not want to be vulnerable
+ to a truncation attack however there is some browser precedent for
ignoring the close_notify for compatibility reasons.
Additionally, Windows 2000 (v5.0) is a special case since it seems it
- doesn't return close_notify. In that case if the connection was closed we
- assume it was graceful (close_notify) since there doesn't seem to be a
+ does not return close_notify. In that case if the connection was closed we
+ assume it was graceful (close_notify) since there does not seem to be a
way to tell.
*/
if(len && !backend->decdata_offset && backend->recv_connection_closed &&
@@ -2420,7 +2398,7 @@ cleanup:
if(!*err && !backend->recv_connection_closed)
*err = CURLE_AGAIN;
- /* It's debatable what to return when !len. We could return whatever error
+ /* it is debatable what to return when !len. We could return whatever error
we got from decryption but instead we override here so the return is
consistent.
*/
@@ -2475,8 +2453,9 @@ static bool schannel_data_pending(struct Curl_cfilter *cf,
/* shut down the SSL connection and clean up related memory.
this function can be called multiple times on the same connection including
if the SSL connection failed (eg connection made but failed handshake). */
-static int schannel_shutdown(struct Curl_cfilter *cf,
- struct Curl_easy *data)
+static CURLcode schannel_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ bool send_shutdown, bool *done)
{
/* See https://msdn.microsoft.com/en-us/library/windows/desktop/aa380138.aspx
* Shutting Down an Schannel Connection
@@ -2484,22 +2463,36 @@ static int schannel_shutdown(struct Curl_cfilter *cf,
struct ssl_connect_data *connssl = cf->ctx;
struct schannel_ssl_backend_data *backend =
(struct schannel_ssl_backend_data *)connssl->backend;
+ CURLcode result = CURLE_OK;
+
+ if(cf->shutdown) {
+ *done = TRUE;
+ return CURLE_OK;
+ }
DEBUGASSERT(data);
DEBUGASSERT(backend);
+ /* Not supported in schannel */
+ (void)send_shutdown;
+
+ *done = FALSE;
if(backend->ctxt) {
infof(data, "schannel: shutting down SSL/TLS connection with %s port %d",
connssl->peer.hostname, connssl->peer.port);
}
- if(backend->cred && backend->ctxt) {
+ if(!backend->ctxt || cf->shutdown) {
+ *done = TRUE;
+ goto out;
+ }
+
+ if(backend->cred && backend->ctxt && !backend->sent_shutdown) {
SecBufferDesc BuffDesc;
SecBuffer Buffer;
SECURITY_STATUS sspi_status;
SecBuffer outbuf;
SecBufferDesc outbuf_desc;
- CURLcode result;
DWORD dwshut = SCHANNEL_SHUTDOWN;
InitSecBuffer(&Buffer, SECBUFFER_TOKEN, &dwshut, sizeof(dwshut));
@@ -2512,6 +2505,8 @@ static int schannel_shutdown(struct Curl_cfilter *cf,
char buffer[STRERROR_LEN];
failf(data, "schannel: ApplyControlToken failure: %s",
Curl_sspi_strerror(sspi_status, buffer, sizeof(buffer)));
+ result = CURLE_SEND_ERROR;
+ goto out;
}
/* setup output buffer */
@@ -2538,13 +2533,75 @@ static int schannel_shutdown(struct Curl_cfilter *cf,
outbuf.pvBuffer, outbuf.cbBuffer,
&result);
s_pSecFn->FreeContextBuffer(outbuf.pvBuffer);
- if((result != CURLE_OK) || (outbuf.cbBuffer != (size_t) written)) {
- infof(data, "schannel: failed to send close msg: %s"
- " (bytes written: %zd)", curl_easy_strerror(result), written);
+ if(!result) {
+ if(written < (ssize_t)outbuf.cbBuffer) {
+ /* TODO: handle partial sends */
+ infof(data, "schannel: failed to send close msg: %s"
+ " (bytes written: %zd)", curl_easy_strerror(result), written);
+ result = CURLE_SEND_ERROR;
+ goto out;
+ }
+ backend->sent_shutdown = TRUE;
+ *done = TRUE;
+ }
+ else if(result == CURLE_AGAIN) {
+ connssl->io_need = CURL_SSL_IO_NEED_SEND;
+ result = CURLE_OK;
+ goto out;
+ }
+ else {
+ if(!backend->recv_connection_closed) {
+ infof(data, "schannel: error sending close msg: %d", result);
+ result = CURLE_SEND_ERROR;
+ goto out;
+ }
+ /* Looks like server already closed the connection.
+ * An error to send our close notify is not a failure. */
+ *done = TRUE;
+ result = CURLE_OK;
}
}
}
+ /* If the connection seems open and we have not seen the close notify
+ * from the server yet, try to receive it. */
+ if(backend->cred && backend->ctxt &&
+ !backend->recv_sspi_close_notify && !backend->recv_connection_closed) {
+ char buffer[1024];
+ ssize_t nread;
+
+ nread = schannel_recv(cf, data, buffer, sizeof(buffer), &result);
+ if(nread > 0) {
+ /* still data coming in? */
+ }
+ else if(nread == 0) {
+ /* We got the close notify alert and are done. */
+ backend->recv_connection_closed = TRUE;
+ *done = TRUE;
+ }
+ else if(nread < 0 && result == CURLE_AGAIN) {
+ connssl->io_need = CURL_SSL_IO_NEED_RECV;
+ }
+ else {
+ CURL_TRC_CF(data, cf, "SSL shutdown, error %d", result);
+ result = CURLE_RECV_ERROR;
+ }
+ }
+
+out:
+ cf->shutdown = (result || *done);
+ return result;
+}
+
+static void schannel_close(struct Curl_cfilter *cf, struct Curl_easy *data)
+{
+ struct ssl_connect_data *connssl = cf->ctx;
+ struct schannel_ssl_backend_data *backend =
+ (struct schannel_ssl_backend_data *)connssl->backend;
+
+ DEBUGASSERT(data);
+ DEBUGASSERT(backend);
+
/* free SSPI Schannel API security context handle */
if(backend->ctxt) {
DEBUGF(infof(data, "schannel: clear security context handle"));
@@ -2574,13 +2631,6 @@ static int schannel_shutdown(struct Curl_cfilter *cf,
backend->decdata_length = 0;
backend->decdata_offset = 0;
}
-
- return CURLE_OK;
-}
-
-static void schannel_close(struct Curl_cfilter *cf, struct Curl_easy *data)
-{
- schannel_shutdown(cf, data);
}
static int schannel_init(void)
@@ -2595,9 +2645,7 @@ static void schannel_cleanup(void)
static size_t schannel_version(char *buffer, size_t size)
{
- size = msnprintf(buffer, size, "Schannel");
-
- return size;
+ return msnprintf(buffer, size, "Schannel");
}
static CURLcode schannel_random(struct Curl_easy *data UNUSED_PARAM,
@@ -2622,7 +2670,7 @@ static CURLcode schannel_pkp_pin_peer_pubkey(struct Curl_cfilter *cf,
DEBUGASSERT(backend);
- /* if a path wasn't specified, don't pin */
+ /* if a path was not specified, do not pin */
if(!pinnedpubkey)
return CURLE_OK;
@@ -2684,6 +2732,13 @@ static void schannel_checksum(const unsigned char *input,
DWORD provType,
const unsigned int algId)
{
+#ifdef CURL_WINDOWS_APP
+ (void)input;
+ (void)inputlen;
+ (void)provType;
+ (void)algId;
+ memset(checksum, 0, checksumlen);
+#else
HCRYPTPROV hProv = 0;
HCRYPTHASH hHash = 0;
DWORD cbHashSize = 0;
@@ -2724,6 +2779,7 @@ static void schannel_checksum(const unsigned char *input,
if(hProv)
CryptReleaseContext(hProv, 0);
+#endif
}
static CURLcode schannel_sha256sum(const unsigned char *input,
@@ -2752,7 +2808,7 @@ HCERTSTORE Curl_schannel_get_cached_cert_store(struct Curl_cfilter *cf,
struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
struct Curl_multi *multi = data->multi;
const struct curl_blob *ca_info_blob = conn_config->ca_info_blob;
- struct schannel_multi_ssl_backend_data *mbackend;
+ struct schannel_cert_share *share;
const struct ssl_general_config *cfg = &data->set.general_ssl;
timediff_t timeout_ms;
timediff_t elapsed_ms;
@@ -2761,12 +2817,14 @@ HCERTSTORE Curl_schannel_get_cached_cert_store(struct Curl_cfilter *cf,
DEBUGASSERT(multi);
- if(!multi || !multi->ssl_backend_data) {
+ if(!multi) {
return NULL;
}
- mbackend = (struct schannel_multi_ssl_backend_data *)multi->ssl_backend_data;
- if(!mbackend->cert_store) {
+ share = Curl_hash_pick(&multi->proto_hash,
+ (void *)MPROTO_SCHANNEL_CERT_SHARE_KEY,
+ sizeof(MPROTO_SCHANNEL_CERT_SHARE_KEY)-1);
+ if(!share || !share->cert_store) {
return NULL;
}
@@ -2781,37 +2839,52 @@ HCERTSTORE Curl_schannel_get_cached_cert_store(struct Curl_cfilter *cf,
timeout_ms = cfg->ca_cache_timeout * (timediff_t)1000;
if(timeout_ms >= 0) {
now = Curl_now();
- elapsed_ms = Curl_timediff(now, mbackend->time);
+ elapsed_ms = Curl_timediff(now, share->time);
if(elapsed_ms >= timeout_ms) {
return NULL;
}
}
if(ca_info_blob) {
- if(!mbackend->CAinfo_blob_digest) {
+ if(!share->CAinfo_blob_digest) {
return NULL;
}
- if(mbackend->CAinfo_blob_size != ca_info_blob->len) {
+ if(share->CAinfo_blob_size != ca_info_blob->len) {
return NULL;
}
schannel_sha256sum((const unsigned char *)ca_info_blob->data,
ca_info_blob->len,
info_blob_digest,
CURL_SHA256_DIGEST_LENGTH);
- if(memcmp(mbackend->CAinfo_blob_digest,
+ if(memcmp(share->CAinfo_blob_digest,
info_blob_digest,
CURL_SHA256_DIGEST_LENGTH)) {
return NULL;
}
}
else {
- if(!conn_config->CAfile || !mbackend->CAfile ||
- strcmp(mbackend->CAfile, conn_config->CAfile)) {
+ if(!conn_config->CAfile || !share->CAfile ||
+ strcmp(share->CAfile, conn_config->CAfile)) {
return NULL;
}
}
- return mbackend->cert_store;
+ return share->cert_store;
+}
+
+static void schannel_cert_share_free(void *key, size_t key_len, void *p)
+{
+ struct schannel_cert_share *share = p;
+ DEBUGASSERT(key_len == (sizeof(MPROTO_SCHANNEL_CERT_SHARE_KEY)-1));
+ DEBUGASSERT(!memcmp(MPROTO_SCHANNEL_CERT_SHARE_KEY, key, key_len));
+ (void)key;
+ (void)key_len;
+ if(share->cert_store) {
+ CertCloseStore(share->cert_store, 0);
+ }
+ free(share->CAinfo_blob_digest);
+ free(share->CAfile);
+ free(share);
}
bool Curl_schannel_set_cached_cert_store(struct Curl_cfilter *cf,
@@ -2821,7 +2894,7 @@ bool Curl_schannel_set_cached_cert_store(struct Curl_cfilter *cf,
struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
struct Curl_multi *multi = data->multi;
const struct curl_blob *ca_info_blob = conn_config->ca_info_blob;
- struct schannel_multi_ssl_backend_data *mbackend;
+ struct schannel_cert_share *share;
unsigned char *CAinfo_blob_digest = NULL;
size_t CAinfo_blob_size = 0;
char *CAfile = NULL;
@@ -2832,17 +2905,23 @@ bool Curl_schannel_set_cached_cert_store(struct Curl_cfilter *cf,
return false;
}
- if(!multi->ssl_backend_data) {
- multi->ssl_backend_data =
- calloc(1, sizeof(struct schannel_multi_ssl_backend_data));
- if(!multi->ssl_backend_data) {
+ share = Curl_hash_pick(&multi->proto_hash,
+ (void *)MPROTO_SCHANNEL_CERT_SHARE_KEY,
+ sizeof(MPROTO_SCHANNEL_CERT_SHARE_KEY)-1);
+ if(!share) {
+ share = calloc(1, sizeof(*share));
+ if(!share) {
+ return false;
+ }
+ if(!Curl_hash_add2(&multi->proto_hash,
+ (void *)MPROTO_SCHANNEL_CERT_SHARE_KEY,
+ sizeof(MPROTO_SCHANNEL_CERT_SHARE_KEY)-1,
+ share, schannel_cert_share_free)) {
+ free(share);
return false;
}
}
- mbackend = (struct schannel_multi_ssl_backend_data *)multi->ssl_backend_data;
-
-
if(ca_info_blob) {
CAinfo_blob_digest = malloc(CURL_SHA256_DIGEST_LENGTH);
if(!CAinfo_blob_digest) {
@@ -2864,33 +2943,20 @@ bool Curl_schannel_set_cached_cert_store(struct Curl_cfilter *cf,
}
/* free old cache data */
- if(mbackend->cert_store) {
- CertCloseStore(mbackend->cert_store, 0);
+ if(share->cert_store) {
+ CertCloseStore(share->cert_store, 0);
}
- free(mbackend->CAinfo_blob_digest);
- free(mbackend->CAfile);
+ free(share->CAinfo_blob_digest);
+ free(share->CAfile);
- mbackend->time = Curl_now();
- mbackend->cert_store = cert_store;
- mbackend->CAinfo_blob_digest = CAinfo_blob_digest;
- mbackend->CAinfo_blob_size = CAinfo_blob_size;
- mbackend->CAfile = CAfile;
+ share->time = Curl_now();
+ share->cert_store = cert_store;
+ share->CAinfo_blob_digest = CAinfo_blob_digest;
+ share->CAinfo_blob_size = CAinfo_blob_size;
+ share->CAfile = CAfile;
return true;
}
-static void schannel_free_multi_ssl_backend_data(
- struct multi_ssl_backend_data *msbd)
-{
- struct schannel_multi_ssl_backend_data *mbackend =
- (struct schannel_multi_ssl_backend_data*)msbd;
- if(mbackend->cert_store) {
- CertCloseStore(mbackend->cert_store, 0);
- }
- free(mbackend->CAinfo_blob_digest);
- free(mbackend->CAfile);
- free(mbackend);
-}
-
const struct Curl_ssl Curl_ssl_schannel = {
{ CURLSSLBACKEND_SCHANNEL, "schannel" }, /* info */
@@ -2898,8 +2964,11 @@ const struct Curl_ssl Curl_ssl_schannel = {
#ifdef HAS_MANUAL_VERIFY_API
SSLSUPP_CAINFO_BLOB |
#endif
+#ifndef CURL_WINDOWS_APP
SSLSUPP_PINNEDPUBKEY |
+#endif
SSLSUPP_TLS13_CIPHERSUITES |
+ SSLSUPP_CA_CACHE |
SSLSUPP_HTTPS_PROXY,
sizeof(struct schannel_ssl_backend_data),
@@ -2925,7 +2994,6 @@ const struct Curl_ssl Curl_ssl_schannel = {
schannel_sha256sum, /* sha256sum */
NULL, /* associate_connection */
NULL, /* disassociate_connection */
- schannel_free_multi_ssl_backend_data, /* free_multi_ssl_backend_data */
schannel_recv, /* recv decrypted data */
schannel_send, /* send data to encrypt */
};
diff --git a/libs/libcurl/src/vtls/schannel_int.h b/libs/libcurl/src/vtls/schannel_int.h
index 92fbc073b0..77105f81ab 100644
--- a/libs/libcurl/src/vtls/schannel_int.h
+++ b/libs/libcurl/src/vtls/schannel_int.h
@@ -28,7 +28,8 @@
#ifdef USE_SCHANNEL
-#if defined(__MINGW32__) || defined(CERT_CHAIN_REVOCATION_CHECK_CHAIN)
+#if (defined(__MINGW32__) || defined(CERT_CHAIN_REVOCATION_CHECK_CHAIN)) \
+ && !defined(CURL_WINDOWS_APP)
#define HAS_MANUAL_VERIFY_API
#endif
@@ -144,7 +145,7 @@ struct schannel_ssl_backend_data {
size_t encdata_offset, decdata_offset;
unsigned char *encdata_buffer, *decdata_buffer;
/* encdata_is_incomplete: if encdata contains only a partial record that
- can't be decrypted without another recv() (that is, status is
+ cannot be decrypted without another recv() (that is, status is
SEC_E_INCOMPLETE_MESSAGE) then set this true. after an recv() adds
more bytes into encdata then set this back to false. */
bool encdata_is_incomplete;
@@ -157,9 +158,13 @@ struct schannel_ssl_backend_data {
#ifdef HAS_MANUAL_VERIFY_API
bool use_manual_cred_validation; /* true if manual cred validation is used */
#endif
+ BIT(sent_shutdown);
};
-struct schannel_multi_ssl_backend_data {
+/* key to use at `multi->proto_hash` */
+#define MPROTO_SCHANNEL_CERT_SHARE_KEY "tls:schannel:cert:share"
+
+struct schannel_cert_share {
unsigned char *CAinfo_blob_digest; /* CA info blob digest */
size_t CAinfo_blob_size; /* CA info blob size */
char *CAfile; /* CAfile path used to generate
diff --git a/libs/libcurl/src/vtls/schannel_verify.c b/libs/libcurl/src/vtls/schannel_verify.c
index 743f3b059e..00b3b4d7d3 100644
--- a/libs/libcurl/src/vtls/schannel_verify.c
+++ b/libs/libcurl/src/vtls/schannel_verify.c
@@ -33,7 +33,7 @@
#ifdef USE_SCHANNEL
#ifndef USE_WINDOWS_SSPI
-# error "Can't compile SCHANNEL support without SSPI."
+# error "cannot compile SCHANNEL support without SSPI."
#endif
#include "schannel.h"
@@ -82,7 +82,7 @@ static int is_cr_or_lf(char c)
}
/* Search the substring needle,needlelen into string haystack,haystacklen
- * Strings don't need to be terminated by a '\0'.
+ * Strings do not need to be terminated by a '\0'.
* Similar of OSX/Linux memmem (not available on Visual Studio).
* Return position of beginning of first occurrence or NULL if not found
*/
@@ -335,7 +335,7 @@ cleanup:
/*
* Returns the number of characters necessary to populate all the host_names.
- * If host_names is not NULL, populate it with all the host names. Each string
+ * If host_names is not NULL, populate it with all the hostnames. Each string
* in the host_names is null-terminated and the last string is double
* null-terminated. If no DNS names are found, a single null-terminated empty
* string is returned.
@@ -346,6 +346,12 @@ static DWORD cert_get_name_string(struct Curl_easy *data,
DWORD length)
{
DWORD actual_length = 0;
+#if defined(CURL_WINDOWS_APP)
+ (void)data;
+ (void)cert_context;
+ (void)host_names;
+ (void)length;
+#else
BOOL compute_content = FALSE;
CERT_INFO *cert_info = NULL;
CERT_EXTENSION *extension = NULL;
@@ -441,14 +447,14 @@ static DWORD cert_get_name_string(struct Curl_easy *data,
}
/* Sanity check to prevent buffer overrun. */
if((actual_length + current_length) > length) {
- failf(data, "schannel: Not enough memory to list all host names.");
+ failf(data, "schannel: Not enough memory to list all hostnames.");
break;
}
dns_w = entry->pwszDNSName;
- /* pwszDNSName is in ia5 string format and hence doesn't contain any
+ /* pwszDNSName is in ia5 string format and hence does not contain any
* non-ascii characters. */
while(*dns_w != '\0') {
- *current_pos++ = (char)(*dns_w++);
+ *current_pos++ = (TCHAR)(*dns_w++);
}
*current_pos++ = '\0';
actual_length += (DWORD)current_length;
@@ -457,6 +463,7 @@ static DWORD cert_get_name_string(struct Curl_easy *data,
/* Last string has double null-terminator. */
*current_pos = '\0';
}
+#endif
return actual_length;
}
diff --git a/libs/libcurl/src/vtls/sectransp.c b/libs/libcurl/src/vtls/sectransp.c
index 6482fb9168..c79e18b95e 100644
--- a/libs/libcurl/src/vtls/sectransp.c
+++ b/libs/libcurl/src/vtls/sectransp.c
@@ -30,6 +30,8 @@
#include "curl_setup.h"
+#ifdef USE_SECTRANSP
+
#include "urldata.h" /* for the Curl_easy definition */
#include "curl_base64.h"
#include "strtok.h"
@@ -37,19 +39,16 @@
#include "strcase.h"
#include "x509asn1.h"
#include "strerror.h"
-
-#ifdef USE_SECTRANSP
+#include "cipher_suite.h"
#ifdef __clang__
#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wtautological-pointer-compare"
+#pragma clang diagnostic ignored "-Wunreachable-code"
#endif /* __clang__ */
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Waddress"
-#pragma GCC diagnostic ignored "-Wundef"
-#pragma GCC diagnostic ignored "-Wunreachable-code"
#endif
#include <limits.h>
@@ -72,7 +71,7 @@
#if (TARGET_OS_MAC && !(TARGET_OS_EMBEDDED || TARGET_OS_IPHONE))
#if MAC_OS_X_VERSION_MAX_ALLOWED < 1050
-#error "The Secure Transport back-end requires Leopard or later."
+#error "The Secure Transport backend requires Leopard or later."
#endif /* MAC_OS_X_VERSION_MAX_ALLOWED < 1050 */
#define CURL_BUILD_IOS 0
@@ -122,7 +121,7 @@
#define CURL_SUPPORT_MAC_10_9 0
#else
-#error "The Secure Transport back-end requires iOS or macOS."
+#error "The Secure Transport backend requires iOS or macOS."
#endif /* (TARGET_OS_MAC && !(TARGET_OS_EMBEDDED || TARGET_OS_IPHONE)) */
#if CURL_BUILD_MAC
@@ -144,7 +143,8 @@
#include "memdebug.h"
-/* From MacTypes.h (which we can't include because it isn't present in iOS: */
+/* From MacTypes.h (which we cannot include because it is not present in
+ iOS: */
#define ioErr -36
#define paramErr -50
@@ -152,636 +152,60 @@ struct st_ssl_backend_data {
SSLContextRef ssl_ctx;
bool ssl_direction; /* true if writing, false if reading */
size_t ssl_write_buffered_length;
+ BIT(sent_shutdown);
};
-struct st_cipher {
- const char *name; /* Cipher suite IANA name. It starts with "TLS_" prefix */
- const char *alias_name; /* Alias name is the same as OpenSSL cipher name */
- SSLCipherSuite num; /* Cipher suite code/number defined in IANA registry */
- bool weak; /* Flag to mark cipher as weak based on previous implementation
- of Secure Transport back-end by CURL */
-};
-
-/* Macro to initialize st_cipher data structure: stringify id to name, cipher
- number/id, 'weak' suite flag
- */
-#define CIPHER_DEF(num, alias, weak) \
- { #num, alias, num, weak }
-
-/*
- Macro to initialize st_cipher data structure with name, code (IANA cipher
- number/id value), and 'weak' suite flag. The first 28 cipher suite numbers
- have the same IANA code for both SSL and TLS standards: numbers 0x0000 to
- 0x001B. They have different names though. The first 4 letters of the cipher
- suite name are the protocol name: "SSL_" or "TLS_", rest of the IANA name is
- the same for both SSL and TLS cipher suite name.
- The second part of the problem is that macOS/iOS SDKs don't define all TLS
- codes but only 12 of them. The SDK defines all SSL codes though, i.e. SSL_NUM
- constant is always defined for those 28 ciphers while TLS_NUM is defined only
- for 12 of the first 28 ciphers. Those 12 TLS cipher codes match to
- corresponding SSL enum value and represent the same cipher suite. Therefore
- we'll use the SSL enum value for those cipher suites because it is defined
- for all 28 of them.
- We make internal data consistent and based on TLS names, i.e. all st_cipher
- item names start with the "TLS_" prefix.
- Summarizing all the above, those 28 first ciphers are presented in our table
- with both TLS and SSL names. Their cipher numbers are assigned based on the
- SDK enum value for the SSL cipher, which matches to IANA TLS number.
+/* Create the list of default ciphers to use by making an intersection of the
+ * ciphers supported by Secure Transport and the list below, using the order
+ * of the former.
+ * This list is based on TLS recommendations by Mozilla, balancing between
+ * security and wide compatibility: "Most ciphers that are not clearly broken
+ * and dangerous to use are supported"
*/
-#define CIPHER_DEF_SSLTLS(num_wo_prefix, alias, weak) \
- { "TLS_" #num_wo_prefix, alias, SSL_##num_wo_prefix, weak }
-
-/*
- Cipher suites were marked as weak based on the following:
- RC4 encryption - rfc7465, the document contains a list of deprecated ciphers.
- Marked in the code below as weak.
- RC2 encryption - many mentions, was found vulnerable to a relatively easy
- attack https://link.springer.com/chapter/10.1007%2F3-540-69710-1_14
- Marked in the code below as weak.
- DES and IDEA encryption - rfc5469, has a list of deprecated ciphers.
- Marked in the code below as weak.
- Anonymous Diffie-Hellman authentication and anonymous elliptic curve
- Diffie-Hellman - vulnerable to a man-in-the-middle attack. Deprecated by
- RFC 4346 aka TLS 1.1 (section A.5, page 60)
- Null bulk encryption suites - not encrypted communication
- Export ciphers, i.e. ciphers with restrictions to be used outside the US for
- software exported to some countries, they were excluded from TLS 1.1
- version. More precisely, they were noted as ciphers which MUST NOT be
- negotiated in RFC 4346 aka TLS 1.1 (section A.5, pages 60 and 61).
- All of those filters were considered weak because they contain a weak
- algorithm like DES, RC2 or RC4, and already considered weak by other
- criteria.
- 3DES - NIST deprecated it and is going to retire it by 2023
- https://csrc.nist.gov/News/2017/Update-to-Current-Use-and-Deprecation-of-TDEA
- OpenSSL https://www.openssl.org/blog/blog/2016/08/24/sweet32/ also
- deprecated those ciphers. Some other libraries also consider it
- vulnerable or at least not strong enough.
-
- CBC ciphers are vulnerable with SSL3.0 and TLS1.0:
- https://www.cisco.com/c/en/us/support/docs/security/email-security-appliance
- /118518-technote-esa-00.html
- We don't take care of this issue because it is resolved by later TLS
- versions and for us, it requires more complicated checks, we need to
- check a protocol version also. Vulnerability doesn't look very critical
- and we do not filter out those cipher suites.
- */
-
-#define CIPHER_WEAK_NOT_ENCRYPTED TRUE
-#define CIPHER_WEAK_RC_ENCRYPTION TRUE
-#define CIPHER_WEAK_DES_ENCRYPTION TRUE
-#define CIPHER_WEAK_IDEA_ENCRYPTION TRUE
-#define CIPHER_WEAK_ANON_AUTH TRUE
-#define CIPHER_WEAK_3DES_ENCRYPTION TRUE
-#define CIPHER_STRONG_ENOUGH FALSE
-
-/* Please do not change the order of the first ciphers available for SSL.
- Do not insert and do not delete any of them. Code below
- depends on their order and continuity.
- If you add a new cipher, please maintain order by number, i.e.
- insert in between existing items to appropriate place based on
- cipher suite IANA number
-*/
-static const struct st_cipher ciphertable[] = {
- /* SSL version 3.0 and initial TLS 1.0 cipher suites.
- Defined since SDK 10.2.8 */
- CIPHER_DEF_SSLTLS(NULL_WITH_NULL_NULL, /* 0x0000 */
- NULL,
- CIPHER_WEAK_NOT_ENCRYPTED),
- CIPHER_DEF_SSLTLS(RSA_WITH_NULL_MD5, /* 0x0001 */
- "NULL-MD5",
- CIPHER_WEAK_NOT_ENCRYPTED),
- CIPHER_DEF_SSLTLS(RSA_WITH_NULL_SHA, /* 0x0002 */
- "NULL-SHA",
- CIPHER_WEAK_NOT_ENCRYPTED),
- CIPHER_DEF_SSLTLS(RSA_EXPORT_WITH_RC4_40_MD5, /* 0x0003 */
- "EXP-RC4-MD5",
- CIPHER_WEAK_RC_ENCRYPTION),
- CIPHER_DEF_SSLTLS(RSA_WITH_RC4_128_MD5, /* 0x0004 */
- "RC4-MD5",
- CIPHER_WEAK_RC_ENCRYPTION),
- CIPHER_DEF_SSLTLS(RSA_WITH_RC4_128_SHA, /* 0x0005 */
- "RC4-SHA",
- CIPHER_WEAK_RC_ENCRYPTION),
- CIPHER_DEF_SSLTLS(RSA_EXPORT_WITH_RC2_CBC_40_MD5, /* 0x0006 */
- "EXP-RC2-CBC-MD5",
- CIPHER_WEAK_RC_ENCRYPTION),
- CIPHER_DEF_SSLTLS(RSA_WITH_IDEA_CBC_SHA, /* 0x0007 */
- "IDEA-CBC-SHA",
- CIPHER_WEAK_IDEA_ENCRYPTION),
- CIPHER_DEF_SSLTLS(RSA_EXPORT_WITH_DES40_CBC_SHA, /* 0x0008 */
- "EXP-DES-CBC-SHA",
- CIPHER_WEAK_DES_ENCRYPTION),
- CIPHER_DEF_SSLTLS(RSA_WITH_DES_CBC_SHA, /* 0x0009 */
- "DES-CBC-SHA",
- CIPHER_WEAK_DES_ENCRYPTION),
- CIPHER_DEF_SSLTLS(RSA_WITH_3DES_EDE_CBC_SHA, /* 0x000A */
- "DES-CBC3-SHA",
- CIPHER_WEAK_3DES_ENCRYPTION),
- CIPHER_DEF_SSLTLS(DH_DSS_EXPORT_WITH_DES40_CBC_SHA, /* 0x000B */
- "EXP-DH-DSS-DES-CBC-SHA",
- CIPHER_WEAK_DES_ENCRYPTION),
- CIPHER_DEF_SSLTLS(DH_DSS_WITH_DES_CBC_SHA, /* 0x000C */
- "DH-DSS-DES-CBC-SHA",
- CIPHER_WEAK_DES_ENCRYPTION),
- CIPHER_DEF_SSLTLS(DH_DSS_WITH_3DES_EDE_CBC_SHA, /* 0x000D */
- "DH-DSS-DES-CBC3-SHA",
- CIPHER_WEAK_3DES_ENCRYPTION),
- CIPHER_DEF_SSLTLS(DH_RSA_EXPORT_WITH_DES40_CBC_SHA, /* 0x000E */
- "EXP-DH-RSA-DES-CBC-SHA",
- CIPHER_WEAK_DES_ENCRYPTION),
- CIPHER_DEF_SSLTLS(DH_RSA_WITH_DES_CBC_SHA, /* 0x000F */
- "DH-RSA-DES-CBC-SHA",
- CIPHER_WEAK_DES_ENCRYPTION),
- CIPHER_DEF_SSLTLS(DH_RSA_WITH_3DES_EDE_CBC_SHA, /* 0x0010 */
- "DH-RSA-DES-CBC3-SHA",
- CIPHER_WEAK_3DES_ENCRYPTION),
- CIPHER_DEF_SSLTLS(DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, /* 0x0011 */
- "EXP-EDH-DSS-DES-CBC-SHA",
- CIPHER_WEAK_DES_ENCRYPTION),
- CIPHER_DEF_SSLTLS(DHE_DSS_WITH_DES_CBC_SHA, /* 0x0012 */
- "EDH-DSS-CBC-SHA",
- CIPHER_WEAK_DES_ENCRYPTION),
- CIPHER_DEF_SSLTLS(DHE_DSS_WITH_3DES_EDE_CBC_SHA, /* 0x0013 */
- "DHE-DSS-DES-CBC3-SHA",
- CIPHER_WEAK_3DES_ENCRYPTION),
- CIPHER_DEF_SSLTLS(DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, /* 0x0014 */
- "EXP-EDH-RSA-DES-CBC-SHA",
- CIPHER_WEAK_DES_ENCRYPTION),
- CIPHER_DEF_SSLTLS(DHE_RSA_WITH_DES_CBC_SHA, /* 0x0015 */
- "EDH-RSA-DES-CBC-SHA",
- CIPHER_WEAK_DES_ENCRYPTION),
- CIPHER_DEF_SSLTLS(DHE_RSA_WITH_3DES_EDE_CBC_SHA, /* 0x0016 */
- "DHE-RSA-DES-CBC3-SHA",
- CIPHER_WEAK_3DES_ENCRYPTION),
- CIPHER_DEF_SSLTLS(DH_anon_EXPORT_WITH_RC4_40_MD5, /* 0x0017 */
- "EXP-ADH-RC4-MD5",
- CIPHER_WEAK_ANON_AUTH),
- CIPHER_DEF_SSLTLS(DH_anon_WITH_RC4_128_MD5, /* 0x0018 */
- "ADH-RC4-MD5",
- CIPHER_WEAK_ANON_AUTH),
- CIPHER_DEF_SSLTLS(DH_anon_EXPORT_WITH_DES40_CBC_SHA, /* 0x0019 */
- "EXP-ADH-DES-CBC-SHA",
- CIPHER_WEAK_ANON_AUTH),
- CIPHER_DEF_SSLTLS(DH_anon_WITH_DES_CBC_SHA, /* 0x001A */
- "ADH-DES-CBC-SHA",
- CIPHER_WEAK_ANON_AUTH),
- CIPHER_DEF_SSLTLS(DH_anon_WITH_3DES_EDE_CBC_SHA, /* 0x001B */
- "ADH-DES-CBC3-SHA",
- CIPHER_WEAK_3DES_ENCRYPTION),
- CIPHER_DEF(SSL_FORTEZZA_DMS_WITH_NULL_SHA, /* 0x001C */
- NULL,
- CIPHER_WEAK_NOT_ENCRYPTED),
- CIPHER_DEF(SSL_FORTEZZA_DMS_WITH_FORTEZZA_CBC_SHA, /* 0x001D */
- NULL,
- CIPHER_STRONG_ENOUGH),
-
-#if CURL_BUILD_MAC_10_9 || CURL_BUILD_IOS_7
- /* RFC 4785 - Pre-Shared Key (PSK) Ciphersuites with NULL Encryption */
- CIPHER_DEF(TLS_PSK_WITH_NULL_SHA, /* 0x002C */
- "PSK-NULL-SHA",
- CIPHER_WEAK_NOT_ENCRYPTED),
- CIPHER_DEF(TLS_DHE_PSK_WITH_NULL_SHA, /* 0x002D */
- "DHE-PSK-NULL-SHA",
- CIPHER_WEAK_NOT_ENCRYPTED),
- CIPHER_DEF(TLS_RSA_PSK_WITH_NULL_SHA, /* 0x002E */
- "RSA-PSK-NULL-SHA",
- CIPHER_WEAK_NOT_ENCRYPTED),
-#endif /* CURL_BUILD_MAC_10_9 || CURL_BUILD_IOS_7 */
-
- /* TLS addenda using AES, per RFC 3268. Defined since SDK 10.4u */
- CIPHER_DEF(TLS_RSA_WITH_AES_128_CBC_SHA, /* 0x002F */
- "AES128-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DH_DSS_WITH_AES_128_CBC_SHA, /* 0x0030 */
- "DH-DSS-AES128-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DH_RSA_WITH_AES_128_CBC_SHA, /* 0x0031 */
- "DH-RSA-AES128-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DHE_DSS_WITH_AES_128_CBC_SHA, /* 0x0032 */
- "DHE-DSS-AES128-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DHE_RSA_WITH_AES_128_CBC_SHA, /* 0x0033 */
- "DHE-RSA-AES128-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DH_anon_WITH_AES_128_CBC_SHA, /* 0x0034 */
- "ADH-AES128-SHA",
- CIPHER_WEAK_ANON_AUTH),
- CIPHER_DEF(TLS_RSA_WITH_AES_256_CBC_SHA, /* 0x0035 */
- "AES256-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DH_DSS_WITH_AES_256_CBC_SHA, /* 0x0036 */
- "DH-DSS-AES256-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DH_RSA_WITH_AES_256_CBC_SHA, /* 0x0037 */
- "DH-RSA-AES256-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DHE_DSS_WITH_AES_256_CBC_SHA, /* 0x0038 */
- "DHE-DSS-AES256-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DHE_RSA_WITH_AES_256_CBC_SHA, /* 0x0039 */
- "DHE-RSA-AES256-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DH_anon_WITH_AES_256_CBC_SHA, /* 0x003A */
- "ADH-AES256-SHA",
- CIPHER_WEAK_ANON_AUTH),
-
-#if CURL_BUILD_MAC_10_8 || CURL_BUILD_IOS
- /* TLS 1.2 addenda, RFC 5246 */
- /* Server provided RSA certificate for key exchange. */
- CIPHER_DEF(TLS_RSA_WITH_NULL_SHA256, /* 0x003B */
- "NULL-SHA256",
- CIPHER_WEAK_NOT_ENCRYPTED),
- CIPHER_DEF(TLS_RSA_WITH_AES_128_CBC_SHA256, /* 0x003C */
- "AES128-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_RSA_WITH_AES_256_CBC_SHA256, /* 0x003D */
- "AES256-SHA256",
- CIPHER_STRONG_ENOUGH),
- /* Server-authenticated (and optionally client-authenticated)
- Diffie-Hellman. */
- CIPHER_DEF(TLS_DH_DSS_WITH_AES_128_CBC_SHA256, /* 0x003E */
- "DH-DSS-AES128-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DH_RSA_WITH_AES_128_CBC_SHA256, /* 0x003F */
- "DH-RSA-AES128-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, /* 0x0040 */
- "DHE-DSS-AES128-SHA256",
- CIPHER_STRONG_ENOUGH),
-
- /* TLS 1.2 addenda, RFC 5246 */
- CIPHER_DEF(TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, /* 0x0067 */
- "DHE-RSA-AES128-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DH_DSS_WITH_AES_256_CBC_SHA256, /* 0x0068 */
- "DH-DSS-AES256-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DH_RSA_WITH_AES_256_CBC_SHA256, /* 0x0069 */
- "DH-RSA-AES256-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, /* 0x006A */
- "DHE-DSS-AES256-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, /* 0x006B */
- "DHE-RSA-AES256-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DH_anon_WITH_AES_128_CBC_SHA256, /* 0x006C */
- "ADH-AES128-SHA256",
- CIPHER_WEAK_ANON_AUTH),
- CIPHER_DEF(TLS_DH_anon_WITH_AES_256_CBC_SHA256, /* 0x006D */
- "ADH-AES256-SHA256",
- CIPHER_WEAK_ANON_AUTH),
-#endif /* CURL_BUILD_MAC_10_8 || CURL_BUILD_IOS */
-
-#if CURL_BUILD_MAC_10_9 || CURL_BUILD_IOS_7
- /* Addendum from RFC 4279, TLS PSK */
- CIPHER_DEF(TLS_PSK_WITH_RC4_128_SHA, /* 0x008A */
- "PSK-RC4-SHA",
- CIPHER_WEAK_RC_ENCRYPTION),
- CIPHER_DEF(TLS_PSK_WITH_3DES_EDE_CBC_SHA, /* 0x008B */
- "PSK-3DES-EDE-CBC-SHA",
- CIPHER_WEAK_3DES_ENCRYPTION),
- CIPHER_DEF(TLS_PSK_WITH_AES_128_CBC_SHA, /* 0x008C */
- "PSK-AES128-CBC-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_PSK_WITH_AES_256_CBC_SHA, /* 0x008D */
- "PSK-AES256-CBC-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DHE_PSK_WITH_RC4_128_SHA, /* 0x008E */
- "DHE-PSK-RC4-SHA",
- CIPHER_WEAK_RC_ENCRYPTION),
- CIPHER_DEF(TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, /* 0x008F */
- "DHE-PSK-3DES-EDE-CBC-SHA",
- CIPHER_WEAK_3DES_ENCRYPTION),
- CIPHER_DEF(TLS_DHE_PSK_WITH_AES_128_CBC_SHA, /* 0x0090 */
- "DHE-PSK-AES128-CBC-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DHE_PSK_WITH_AES_256_CBC_SHA, /* 0x0091 */
- "DHE-PSK-AES256-CBC-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_RSA_PSK_WITH_RC4_128_SHA, /* 0x0092 */
- "RSA-PSK-RC4-SHA",
- CIPHER_WEAK_RC_ENCRYPTION),
- CIPHER_DEF(TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, /* 0x0093 */
- "RSA-PSK-3DES-EDE-CBC-SHA",
- CIPHER_WEAK_3DES_ENCRYPTION),
- CIPHER_DEF(TLS_RSA_PSK_WITH_AES_128_CBC_SHA, /* 0x0094 */
- "RSA-PSK-AES128-CBC-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_RSA_PSK_WITH_AES_256_CBC_SHA, /* 0x0095 */
- "RSA-PSK-AES256-CBC-SHA",
- CIPHER_STRONG_ENOUGH),
-#endif /* CURL_BUILD_MAC_10_9 || CURL_BUILD_IOS_7 */
-
-#if CURL_BUILD_MAC_10_8 || CURL_BUILD_IOS
- /* Addenda from rfc 5288 AES Galois Counter Mode (GCM) Cipher Suites
- for TLS. */
- CIPHER_DEF(TLS_RSA_WITH_AES_128_GCM_SHA256, /* 0x009C */
- "AES128-GCM-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_RSA_WITH_AES_256_GCM_SHA384, /* 0x009D */
- "AES256-GCM-SHA384",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, /* 0x009E */
- "DHE-RSA-AES128-GCM-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, /* 0x009F */
- "DHE-RSA-AES256-GCM-SHA384",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DH_RSA_WITH_AES_128_GCM_SHA256, /* 0x00A0 */
- "DH-RSA-AES128-GCM-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DH_RSA_WITH_AES_256_GCM_SHA384, /* 0x00A1 */
- "DH-RSA-AES256-GCM-SHA384",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DHE_DSS_WITH_AES_128_GCM_SHA256, /* 0x00A2 */
- "DHE-DSS-AES128-GCM-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DHE_DSS_WITH_AES_256_GCM_SHA384, /* 0x00A3 */
- "DHE-DSS-AES256-GCM-SHA384",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DH_DSS_WITH_AES_128_GCM_SHA256, /* 0x00A4 */
- "DH-DSS-AES128-GCM-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DH_DSS_WITH_AES_256_GCM_SHA384, /* 0x00A5 */
- "DH-DSS-AES256-GCM-SHA384",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DH_anon_WITH_AES_128_GCM_SHA256, /* 0x00A6 */
- "ADH-AES128-GCM-SHA256",
- CIPHER_WEAK_ANON_AUTH),
- CIPHER_DEF(TLS_DH_anon_WITH_AES_256_GCM_SHA384, /* 0x00A7 */
- "ADH-AES256-GCM-SHA384",
- CIPHER_WEAK_ANON_AUTH),
-#endif /* CURL_BUILD_MAC_10_8 || CURL_BUILD_IOS */
-
-#if CURL_BUILD_MAC_10_9 || CURL_BUILD_IOS_7
- /* RFC 5487 - PSK with SHA-256/384 and AES GCM */
- CIPHER_DEF(TLS_PSK_WITH_AES_128_GCM_SHA256, /* 0x00A8 */
- "PSK-AES128-GCM-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_PSK_WITH_AES_256_GCM_SHA384, /* 0x00A9 */
- "PSK-AES256-GCM-SHA384",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DHE_PSK_WITH_AES_128_GCM_SHA256, /* 0x00AA */
- "DHE-PSK-AES128-GCM-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DHE_PSK_WITH_AES_256_GCM_SHA384, /* 0x00AB */
- "DHE-PSK-AES256-GCM-SHA384",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, /* 0x00AC */
- "RSA-PSK-AES128-GCM-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, /* 0x00AD */
- "RSA-PSK-AES256-GCM-SHA384",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_PSK_WITH_AES_128_CBC_SHA256, /* 0x00AE */
- "PSK-AES128-CBC-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_PSK_WITH_AES_256_CBC_SHA384, /* 0x00AF */
- "PSK-AES256-CBC-SHA384",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_PSK_WITH_NULL_SHA256, /* 0x00B0 */
- "PSK-NULL-SHA256",
- CIPHER_WEAK_NOT_ENCRYPTED),
- CIPHER_DEF(TLS_PSK_WITH_NULL_SHA384, /* 0x00B1 */
- "PSK-NULL-SHA384",
- CIPHER_WEAK_NOT_ENCRYPTED),
- CIPHER_DEF(TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, /* 0x00B2 */
- "DHE-PSK-AES128-CBC-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, /* 0x00B3 */
- "DHE-PSK-AES256-CBC-SHA384",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_DHE_PSK_WITH_NULL_SHA256, /* 0x00B4 */
- "DHE-PSK-NULL-SHA256",
- CIPHER_WEAK_NOT_ENCRYPTED),
- CIPHER_DEF(TLS_DHE_PSK_WITH_NULL_SHA384, /* 0x00B5 */
- "DHE-PSK-NULL-SHA384",
- CIPHER_WEAK_NOT_ENCRYPTED),
- CIPHER_DEF(TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, /* 0x00B6 */
- "RSA-PSK-AES128-CBC-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, /* 0x00B7 */
- "RSA-PSK-AES256-CBC-SHA384",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_RSA_PSK_WITH_NULL_SHA256, /* 0x00B8 */
- "RSA-PSK-NULL-SHA256",
- CIPHER_WEAK_NOT_ENCRYPTED),
- CIPHER_DEF(TLS_RSA_PSK_WITH_NULL_SHA384, /* 0x00B9 */
- "RSA-PSK-NULL-SHA384",
- CIPHER_WEAK_NOT_ENCRYPTED),
-#endif /* CURL_BUILD_MAC_10_9 || CURL_BUILD_IOS_7 */
-
- /* RFC 5746 - Secure Renegotiation. This is not a real suite,
- it is a response to initiate negotiation again */
- CIPHER_DEF(TLS_EMPTY_RENEGOTIATION_INFO_SCSV, /* 0x00FF */
- NULL,
- CIPHER_STRONG_ENOUGH),
-
-#if CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11
- /* TLS 1.3 standard cipher suites for ChaCha20+Poly1305.
- Note: TLS 1.3 ciphersuites do not specify the key exchange
- algorithm -- they only specify the symmetric ciphers.
- Cipher alias name matches to OpenSSL cipher name, and for
- TLS 1.3 ciphers */
- CIPHER_DEF(TLS_AES_128_GCM_SHA256, /* 0x1301 */
- NULL, /* The OpenSSL cipher name matches to the IANA name */
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_AES_256_GCM_SHA384, /* 0x1302 */
- NULL, /* The OpenSSL cipher name matches to the IANA name */
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_CHACHA20_POLY1305_SHA256, /* 0x1303 */
- NULL, /* The OpenSSL cipher name matches to the IANA name */
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_AES_128_CCM_SHA256, /* 0x1304 */
- NULL, /* The OpenSSL cipher name matches to the IANA name */
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_AES_128_CCM_8_SHA256, /* 0x1305 */
- NULL, /* The OpenSSL cipher name matches to the IANA name */
- CIPHER_STRONG_ENOUGH),
-#endif /* CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11 */
+static const uint16_t default_ciphers[] = {
+ TLS_RSA_WITH_3DES_EDE_CBC_SHA, /* 0x000A */
+ TLS_RSA_WITH_AES_128_CBC_SHA, /* 0x002F */
+ TLS_RSA_WITH_AES_256_CBC_SHA, /* 0x0035 */
#if CURL_BUILD_MAC_10_6 || CURL_BUILD_IOS
- /* ECDSA addenda, RFC 4492 */
- CIPHER_DEF(TLS_ECDH_ECDSA_WITH_NULL_SHA, /* 0xC001 */
- "ECDH-ECDSA-NULL-SHA",
- CIPHER_WEAK_NOT_ENCRYPTED),
- CIPHER_DEF(TLS_ECDH_ECDSA_WITH_RC4_128_SHA, /* 0xC002 */
- "ECDH-ECDSA-RC4-SHA",
- CIPHER_WEAK_RC_ENCRYPTION),
- CIPHER_DEF(TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA, /* 0xC003 */
- "ECDH-ECDSA-DES-CBC3-SHA",
- CIPHER_WEAK_3DES_ENCRYPTION),
- CIPHER_DEF(TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, /* 0xC004 */
- "ECDH-ECDSA-AES128-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA, /* 0xC005 */
- "ECDH-ECDSA-AES256-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDHE_ECDSA_WITH_NULL_SHA, /* 0xC006 */
- "ECDHE-ECDSA-NULL-SHA",
- CIPHER_WEAK_NOT_ENCRYPTED),
- CIPHER_DEF(TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, /* 0xC007 */
- "ECDHE-ECDSA-RC4-SHA",
- CIPHER_WEAK_RC_ENCRYPTION),
- CIPHER_DEF(TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, /* 0xC008 */
- "ECDHE-ECDSA-DES-CBC3-SHA",
- CIPHER_WEAK_3DES_ENCRYPTION),
- CIPHER_DEF(TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, /* 0xC009 */
- "ECDHE-ECDSA-AES128-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, /* 0xC00A */
- "ECDHE-ECDSA-AES256-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDH_RSA_WITH_NULL_SHA, /* 0xC00B */
- "ECDH-RSA-NULL-SHA",
- CIPHER_WEAK_NOT_ENCRYPTED),
- CIPHER_DEF(TLS_ECDH_RSA_WITH_RC4_128_SHA, /* 0xC00C */
- "ECDH-RSA-RC4-SHA",
- CIPHER_WEAK_RC_ENCRYPTION),
- CIPHER_DEF(TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA, /* 0xC00D */
- "ECDH-RSA-DES-CBC3-SHA",
- CIPHER_WEAK_3DES_ENCRYPTION),
- CIPHER_DEF(TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, /* 0xC00E */
- "ECDH-RSA-AES128-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDH_RSA_WITH_AES_256_CBC_SHA, /* 0xC00F */
- "ECDH-RSA-AES256-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDHE_RSA_WITH_NULL_SHA, /* 0xC010 */
- "ECDHE-RSA-NULL-SHA",
- CIPHER_WEAK_NOT_ENCRYPTED),
- CIPHER_DEF(TLS_ECDHE_RSA_WITH_RC4_128_SHA, /* 0xC011 */
- "ECDHE-RSA-RC4-SHA",
- CIPHER_WEAK_RC_ENCRYPTION),
- CIPHER_DEF(TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, /* 0xC012 */
- "ECDHE-RSA-DES-CBC3-SHA",
- CIPHER_WEAK_3DES_ENCRYPTION),
- CIPHER_DEF(TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, /* 0xC013 */
- "ECDHE-RSA-AES128-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, /* 0xC014 */
- "ECDHE-RSA-AES256-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDH_anon_WITH_NULL_SHA, /* 0xC015 */
- "AECDH-NULL-SHA",
- CIPHER_WEAK_ANON_AUTH),
- CIPHER_DEF(TLS_ECDH_anon_WITH_RC4_128_SHA, /* 0xC016 */
- "AECDH-RC4-SHA",
- CIPHER_WEAK_ANON_AUTH),
- CIPHER_DEF(TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA, /* 0xC017 */
- "AECDH-DES-CBC3-SHA",
- CIPHER_WEAK_3DES_ENCRYPTION),
- CIPHER_DEF(TLS_ECDH_anon_WITH_AES_128_CBC_SHA, /* 0xC018 */
- "AECDH-AES128-SHA",
- CIPHER_WEAK_ANON_AUTH),
- CIPHER_DEF(TLS_ECDH_anon_WITH_AES_256_CBC_SHA, /* 0xC019 */
- "AECDH-AES256-SHA",
- CIPHER_WEAK_ANON_AUTH),
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, /* 0xC009 */
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, /* 0xC00A */
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, /* 0xC013 */
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, /* 0xC014 */
#endif /* CURL_BUILD_MAC_10_6 || CURL_BUILD_IOS */
#if CURL_BUILD_MAC_10_8 || CURL_BUILD_IOS
- /* Addenda from rfc 5289 Elliptic Curve Cipher Suites with
- HMAC SHA-256/384. */
- CIPHER_DEF(TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, /* 0xC023 */
- "ECDHE-ECDSA-AES128-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, /* 0xC024 */
- "ECDHE-ECDSA-AES256-SHA384",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, /* 0xC025 */
- "ECDH-ECDSA-AES128-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384, /* 0xC026 */
- "ECDH-ECDSA-AES256-SHA384",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, /* 0xC027 */
- "ECDHE-RSA-AES128-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, /* 0xC028 */
- "ECDHE-RSA-AES256-SHA384",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, /* 0xC029 */
- "ECDH-RSA-AES128-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384, /* 0xC02A */
- "ECDH-RSA-AES256-SHA384",
- CIPHER_STRONG_ENOUGH),
- /* Addenda from rfc 5289 Elliptic Curve Cipher Suites with
- SHA-256/384 and AES Galois Counter Mode (GCM) */
- CIPHER_DEF(TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, /* 0xC02B */
- "ECDHE-ECDSA-AES128-GCM-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, /* 0xC02C */
- "ECDHE-ECDSA-AES256-GCM-SHA384",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, /* 0xC02D */
- "ECDH-ECDSA-AES128-GCM-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384, /* 0xC02E */
- "ECDH-ECDSA-AES256-GCM-SHA384",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, /* 0xC02F */
- "ECDHE-RSA-AES128-GCM-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, /* 0xC030 */
- "ECDHE-RSA-AES256-GCM-SHA384",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, /* 0xC031 */
- "ECDH-RSA-AES128-GCM-SHA256",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384, /* 0xC032 */
- "ECDH-RSA-AES256-GCM-SHA384",
- CIPHER_STRONG_ENOUGH),
+ TLS_RSA_WITH_AES_128_CBC_SHA256, /* 0x003C */
+ TLS_RSA_WITH_AES_256_CBC_SHA256, /* 0x003D */
+ TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, /* 0x0067 */
+ TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, /* 0x006B */
+ TLS_RSA_WITH_AES_128_GCM_SHA256, /* 0x009C */
+ TLS_RSA_WITH_AES_256_GCM_SHA384, /* 0x009D */
+ TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, /* 0x009E */
+ TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, /* 0x009F */
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, /* 0xC023 */
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, /* 0xC024 */
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, /* 0xC027 */
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, /* 0xC028 */
+ TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, /* 0xC02B */
+ TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, /* 0xC02C */
+ TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, /* 0xC02F */
+ TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, /* 0xC030 */
#endif /* CURL_BUILD_MAC_10_8 || CURL_BUILD_IOS */
-#if CURL_BUILD_MAC_10_15 || CURL_BUILD_IOS_13
- /* ECDHE_PSK Cipher Suites for Transport Layer Security (TLS), RFC 5489 */
- CIPHER_DEF(TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, /* 0xC035 */
- "ECDHE-PSK-AES128-CBC-SHA",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, /* 0xC036 */
- "ECDHE-PSK-AES256-CBC-SHA",
- CIPHER_STRONG_ENOUGH),
-#endif /* CURL_BUILD_MAC_10_15 || CURL_BUILD_IOS_13 */
-
#if CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11
- /* Addenda from rfc 7905 ChaCha20-Poly1305 Cipher Suites for
- Transport Layer Security (TLS). */
- CIPHER_DEF(TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, /* 0xCCA8 */
- "ECDHE-RSA-CHACHA20-POLY1305",
- CIPHER_STRONG_ENOUGH),
- CIPHER_DEF(TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, /* 0xCCA9 */
- "ECDHE-ECDSA-CHACHA20-POLY1305",
- CIPHER_STRONG_ENOUGH),
+ TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, /* 0xCCA8 */
+ TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, /* 0xCCA9 */
+
+ /* TLSv1.3 is not supported by sectransp, but there is also other
+ * code referencing TLSv1.3, like: kTLSProtocol13 ? */
+ TLS_AES_128_GCM_SHA256, /* 0x1301 */
+ TLS_AES_256_GCM_SHA384, /* 0x1302 */
+ TLS_CHACHA20_POLY1305_SHA256, /* 0x1303 */
#endif /* CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11 */
-
-#if CURL_BUILD_MAC_10_15 || CURL_BUILD_IOS_13
- /* ChaCha20-Poly1305 Cipher Suites for Transport Layer Security (TLS),
- RFC 7905 */
- CIPHER_DEF(TLS_PSK_WITH_CHACHA20_POLY1305_SHA256, /* 0xCCAB */
- "PSK-CHACHA20-POLY1305",
- CIPHER_STRONG_ENOUGH),
-#endif /* CURL_BUILD_MAC_10_15 || CURL_BUILD_IOS_13 */
-
- /* Tags for SSL 2 cipher kinds which are not specified for SSL 3.
- Defined since SDK 10.2.8 */
- CIPHER_DEF(SSL_RSA_WITH_RC2_CBC_MD5, /* 0xFF80 */
- NULL,
- CIPHER_WEAK_RC_ENCRYPTION),
- CIPHER_DEF(SSL_RSA_WITH_IDEA_CBC_MD5, /* 0xFF81 */
- NULL,
- CIPHER_WEAK_IDEA_ENCRYPTION),
- CIPHER_DEF(SSL_RSA_WITH_DES_CBC_MD5, /* 0xFF82 */
- NULL,
- CIPHER_WEAK_DES_ENCRYPTION),
- CIPHER_DEF(SSL_RSA_WITH_3DES_EDE_CBC_MD5, /* 0xFF83 */
- NULL,
- CIPHER_WEAK_3DES_ENCRYPTION),
};
-#define NUM_OF_CIPHERS sizeof(ciphertable)/sizeof(ciphertable[0])
+#define DEFAULT_CIPHERS_LEN sizeof(default_ciphers)/sizeof(default_ciphers[0])
/* pinned public key support tests */
@@ -816,7 +240,7 @@ static const unsigned char rsa2048SpkiHeader[] = {
0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05,
0x00, 0x03, 0x82, 0x01, 0x0f, 0x00};
#ifdef SECTRANSP_PINNEDPUBKEY_V1
-/* the *new* version doesn't return DER encoded ecdsa certs like the old... */
+/* the *new* version does not return DER encoded ecdsa certs like the old... */
static const unsigned char ecDsaSecp256r1SpkiHeader[] = {
0x30, 0x59, 0x30, 0x13, 0x06, 0x07,
0x2a, 0x86, 0x48, 0xce, 0x3d, 0x02,
@@ -906,25 +330,6 @@ static OSStatus sectransp_bio_cf_out_write(SSLConnectionRef connection,
return rtn;
}
-CF_INLINE const char *TLSCipherNameForNumber(SSLCipherSuite cipher)
-{
- /* The first ciphers in the ciphertable are continuous. Here we do small
- optimization and instead of loop directly get SSL name by cipher number.
- */
- size_t i;
- if(cipher <= SSL_FORTEZZA_DMS_WITH_FORTEZZA_CBC_SHA) {
- return ciphertable[cipher].name;
- }
- /* Iterate through the rest of the ciphers */
- for(i = SSL_FORTEZZA_DMS_WITH_FORTEZZA_CBC_SHA + 1; i < NUM_OF_CIPHERS;
- ++i) {
- if(ciphertable[i].num == cipher) {
- return ciphertable[i].name;
- }
- }
- return ciphertable[SSL_NULL_WITH_NULL_NULL].name;
-}
-
#if CURL_BUILD_MAC
CF_INLINE void GetDarwinVersionNumber(int *major, int *minor)
{
@@ -957,27 +362,27 @@ CF_INLINE void GetDarwinVersionNumber(int *major, int *minor)
#endif /* CURL_BUILD_MAC */
/* Apple provides a myriad of ways of getting information about a certificate
- into a string. Some aren't available under iOS or newer cats. So here's
- a unified function for getting a string describing the certificate that
- ought to work in all cats starting with Leopard. */
+ into a string. Some are not available under iOS or newer cats. Here's a
+ unified function for getting a string describing the certificate that ought
+ to work in all cats starting with Leopard. */
CF_INLINE CFStringRef getsubject(SecCertificateRef cert)
{
CFStringRef server_cert_summary = CFSTR("(null)");
#if CURL_BUILD_IOS
- /* iOS: There's only one way to do this. */
+ /* iOS: There is only one way to do this. */
server_cert_summary = SecCertificateCopySubjectSummary(cert);
#else
#if CURL_BUILD_MAC_10_7
/* Lion & later: Get the long description if we can. */
- if(SecCertificateCopyLongDescription)
+ if(&SecCertificateCopyLongDescription)
server_cert_summary =
SecCertificateCopyLongDescription(NULL, cert, NULL);
else
#endif /* CURL_BUILD_MAC_10_7 */
#if CURL_BUILD_MAC_10_6
/* Snow Leopard: Get the certificate summary. */
- if(SecCertificateCopySubjectSummary)
+ if(&SecCertificateCopySubjectSummary)
server_cert_summary = SecCertificateCopySubjectSummary(cert);
else
#endif /* CURL_BUILD_MAC_10_6 */
@@ -1015,7 +420,7 @@ static CURLcode CopyCertSubject(struct Curl_easy *data,
size_t cbuf_size = ((size_t)CFStringGetLength(c) * 4) + 1;
cbuf = calloc(1, cbuf_size);
if(cbuf) {
- if(!CFStringGetCString(c, cbuf, cbuf_size,
+ if(!CFStringGetCString(c, cbuf, (CFIndex)cbuf_size,
kCFStringEncodingUTF8)) {
failf(data, "SSL: invalid CA certificate subject");
result = CURLE_PEER_FAILED_VERIFICATION;
@@ -1025,7 +430,7 @@ static CURLcode CopyCertSubject(struct Curl_easy *data,
*certp = cbuf;
}
else {
- failf(data, "SSL: couldn't allocate %zu bytes of memory", cbuf_size);
+ failf(data, "SSL: could not allocate %zu bytes of memory", cbuf_size);
result = CURLE_OUT_OF_MEMORY;
}
}
@@ -1037,7 +442,7 @@ static CURLcode CopyCertSubject(struct Curl_easy *data,
#if CURL_SUPPORT_MAC_10_6
/* The SecKeychainSearch API was deprecated in Lion, and using it will raise
- deprecation warnings, so let's not compile this unless it's necessary: */
+ deprecation warnings, so let's not compile this unless it is necessary: */
static OSStatus CopyIdentityWithLabelOldSchool(char *label,
SecIdentityRef *out_c_a_k)
{
@@ -1090,7 +495,7 @@ static OSStatus CopyIdentityWithLabel(char *label,
/* SecItemCopyMatching() was introduced in iOS and Snow Leopard.
kSecClassIdentity was introduced in Lion. If both exist, let's use them
to find the certificate. */
- if(SecItemCopyMatching && kSecClassIdentity) {
+ if(&SecItemCopyMatching && kSecClassIdentity) {
CFTypeRef keys[5];
CFTypeRef values[5];
CFDictionaryRef query_dict;
@@ -1108,7 +513,7 @@ static OSStatus CopyIdentityWithLabel(char *label,
/* identity searches need a SecPolicyRef in order to work */
values[3] = SecPolicyCreateSSL(false, NULL);
keys[3] = kSecMatchPolicy;
- /* match the name of the certificate (doesn't work in macOS 10.12.1) */
+ /* match the name of the certificate (does not work in macOS 10.12.1) */
values[4] = label_cf;
keys[4] = kSecAttrLabel;
query_dict = CFDictionaryCreate(NULL, (const void **)keys,
@@ -1120,7 +525,7 @@ static OSStatus CopyIdentityWithLabel(char *label,
/* Do we have a match? */
status = SecItemCopyMatching(query_dict, (CFTypeRef *) &keys_list);
- /* Because kSecAttrLabel matching doesn't work with kSecClassIdentity,
+ /* Because kSecAttrLabel matching does not work with kSecClassIdentity,
* we need to find the correct identity ourselves */
if(status == noErr) {
keys_list_count = CFArrayGetCount(keys_list);
@@ -1194,7 +599,8 @@ static OSStatus CopyIdentityFromPKCS12File(const char *cPath,
if(blob) {
pkcs_data = CFDataCreate(kCFAllocatorDefault,
- (const unsigned char *)blob->data, blob->len);
+ (const unsigned char *)blob->data,
+ (CFIndex)blob->len);
status = (pkcs_data != NULL) ? errSecSuccess : errSecAllocate;
resource_imported = (pkcs_data != NULL);
}
@@ -1202,7 +608,7 @@ static OSStatus CopyIdentityFromPKCS12File(const char *cPath,
pkcs_url =
CFURLCreateFromFileSystemRepresentation(NULL,
(const UInt8 *)cPath,
- strlen(cPath), false);
+ (CFIndex)strlen(cPath), false);
resource_imported =
CFURLCreateDataAndPropertiesFromResource(NULL,
pkcs_url, &pkcs_data,
@@ -1231,7 +637,7 @@ static OSStatus CopyIdentityFromPKCS12File(const char *cPath,
/* On macOS SecPKCS12Import will always add the client certificate to
* the Keychain.
*
- * As this doesn't match iOS, and apps may not want to see their client
+ * As this does not match iOS, and apps may not want to see their client
* certificate saved in the user's keychain, we use SecItemImport
* with a NULL keychain to avoid importing it.
*
@@ -1327,13 +733,14 @@ static CURLcode sectransp_version_from_curl(SSLProtocol *darwinver,
return CURLE_OK;
case CURL_SSLVERSION_TLSv1_3:
/* TLS 1.3 support first appeared in iOS 11 and macOS 10.13 */
-#if (CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && HAVE_BUILTIN_AVAILABLE == 1
+#if (CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && \
+ defined(HAVE_BUILTIN_AVAILABLE)
if(__builtin_available(macOS 10.13, iOS 11.0, *)) {
*darwinver = kTLSProtocol13;
return CURLE_OK;
}
#endif /* (CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) &&
- HAVE_BUILTIN_AVAILABLE == 1 */
+ defined(HAVE_BUILTIN_AVAILABLE) */
break;
}
return CURLE_SSL_CONNECT_ERROR;
@@ -1356,7 +763,8 @@ static CURLcode set_ssl_version_min_max(struct Curl_cfilter *cf,
/* macOS 10.5-10.7 supported TLS 1.0 only.
macOS 10.8 and later, and iOS 5 and later, added TLS 1.1 and 1.2.
macOS 10.13 and later, and iOS 11 and later, added TLS 1.3. */
-#if (CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && HAVE_BUILTIN_AVAILABLE == 1
+#if (CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && \
+ defined(HAVE_BUILTIN_AVAILABLE)
if(__builtin_available(macOS 10.13, iOS 11.0, *)) {
max_supported_version_by_os = CURL_SSLVERSION_MAX_TLSv1_3;
}
@@ -1366,7 +774,7 @@ static CURLcode set_ssl_version_min_max(struct Curl_cfilter *cf,
#else
max_supported_version_by_os = CURL_SSLVERSION_MAX_TLSv1_2;
#endif /* (CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) &&
- HAVE_BUILTIN_AVAILABLE == 1 */
+ defined(HAVE_BUILTIN_AVAILABLE) */
switch(ssl_version) {
case CURL_SSLVERSION_DEFAULT:
@@ -1383,7 +791,7 @@ static CURLcode set_ssl_version_min_max(struct Curl_cfilter *cf,
}
#if CURL_BUILD_MAC_10_8 || CURL_BUILD_IOS
- if(SSLSetProtocolVersionMax) {
+ if(&SSLSetProtocolVersionMax) {
SSLProtocol darwin_ver_min = kTLSProtocol1;
SSLProtocol darwin_ver_max = kTLSProtocol1;
CURLcode result = sectransp_version_from_curl(&darwin_ver_min,
@@ -1439,208 +847,223 @@ static CURLcode set_ssl_version_min_max(struct Curl_cfilter *cf,
return CURLE_SSL_CONNECT_ERROR;
}
-static bool is_cipher_suite_strong(SSLCipherSuite suite_num)
+static int sectransp_cipher_suite_get_str(uint16_t id, char *buf,
+ size_t buf_size, bool prefer_rfc)
{
- size_t i;
- for(i = 0; i < NUM_OF_CIPHERS; ++i) {
- if(ciphertable[i].num == suite_num) {
- return !ciphertable[i].weak;
- }
- }
- /* If the cipher is not in our list, assume it is a new one
- and therefore strong. Previous implementation was the same,
- if cipher suite is not in the list, it was considered strong enough */
- return true;
+ /* are these fortezza suites even supported ? */
+ if(id == SSL_FORTEZZA_DMS_WITH_FORTEZZA_CBC_SHA)
+ msnprintf(buf, buf_size, "%s", "SSL_FORTEZZA_DMS_WITH_FORTEZZA_CBC_SHA");
+ else if(id == SSL_FORTEZZA_DMS_WITH_NULL_SHA)
+ msnprintf(buf, buf_size, "%s", "SSL_FORTEZZA_DMS_WITH_NULL_SHA");
+ /* can TLS_EMPTY_RENEGOTIATION_INFO_SCSV even be set ? */
+ else if(id == TLS_EMPTY_RENEGOTIATION_INFO_SCSV)
+ msnprintf(buf, buf_size, "%s", "TLS_EMPTY_RENEGOTIATION_INFO_SCSV");
+ /* do we still need to support these SSL2-only ciphers ? */
+ else if(id == SSL_RSA_WITH_RC2_CBC_MD5)
+ msnprintf(buf, buf_size, "%s", "SSL_RSA_WITH_RC2_CBC_MD5");
+ else if(id == SSL_RSA_WITH_IDEA_CBC_MD5)
+ msnprintf(buf, buf_size, "%s", "SSL_RSA_WITH_IDEA_CBC_MD5");
+ else if(id == SSL_RSA_WITH_DES_CBC_MD5)
+ msnprintf(buf, buf_size, "%s", "SSL_RSA_WITH_DES_CBC_MD5");
+ else if(id == SSL_RSA_WITH_3DES_EDE_CBC_MD5)
+ msnprintf(buf, buf_size, "%s", "SSL_RSA_WITH_3DES_EDE_CBC_MD5");
+ else
+ return Curl_cipher_suite_get_str(id, buf, buf_size, prefer_rfc);
+ return 0;
}
-static bool sectransp_is_separator(char c)
+static uint16_t sectransp_cipher_suite_walk_str(const char **str,
+ const char **end)
{
- /* Return whether character is a cipher list separator. */
- switch(c) {
- case ' ':
- case '\t':
- case ':':
- case ',':
- case ';':
- return true;
- }
- return false;
+ uint16_t id = Curl_cipher_suite_walk_str(str, end);
+ size_t len = *end - *str;
+
+ if(!id) {
+ /* are these fortezza suites even supported ? */
+ if(strncasecompare("SSL_FORTEZZA_DMS_WITH_FORTEZZA_CBC_SHA", *str, len))
+ id = SSL_FORTEZZA_DMS_WITH_FORTEZZA_CBC_SHA;
+ else if(strncasecompare("SSL_FORTEZZA_DMS_WITH_NULL_SHA", *str, len))
+ id = SSL_FORTEZZA_DMS_WITH_NULL_SHA;
+ /* can TLS_EMPTY_RENEGOTIATION_INFO_SCSV even be set ? */
+ else if(strncasecompare("TLS_EMPTY_RENEGOTIATION_INFO_SCSV", *str, len))
+ id = TLS_EMPTY_RENEGOTIATION_INFO_SCSV;
+ /* do we still need to support these SSL2-only ciphers ? */
+ else if(strncasecompare("SSL_RSA_WITH_RC2_CBC_MD5", *str, len))
+ id = SSL_RSA_WITH_RC2_CBC_MD5;
+ else if(strncasecompare("SSL_RSA_WITH_IDEA_CBC_MD5", *str, len))
+ id = SSL_RSA_WITH_IDEA_CBC_MD5;
+ else if(strncasecompare("SSL_RSA_WITH_DES_CBC_MD5", *str, len))
+ id = SSL_RSA_WITH_DES_CBC_MD5;
+ else if(strncasecompare("SSL_RSA_WITH_3DES_EDE_CBC_MD5", *str, len))
+ id = SSL_RSA_WITH_3DES_EDE_CBC_MD5;
+ }
+ return id;
}
-static CURLcode sectransp_set_default_ciphers(struct Curl_easy *data,
- SSLContextRef ssl_ctx)
+/* allocated memory must be freed */
+static SSLCipherSuite * sectransp_get_supported_ciphers(SSLContextRef ssl_ctx,
+ size_t *len)
{
- size_t all_ciphers_count = 0UL, allowed_ciphers_count = 0UL, i;
- SSLCipherSuite *all_ciphers = NULL, *allowed_ciphers = NULL;
+ SSLCipherSuite *ciphers = NULL;
OSStatus err = noErr;
+ *len = 0;
-#if CURL_BUILD_MAC
- int darwinver_maj = 0, darwinver_min = 0;
+ err = SSLGetNumberSupportedCiphers(ssl_ctx, len);
+ if(err != noErr)
+ goto failed;
- GetDarwinVersionNumber(&darwinver_maj, &darwinver_min);
-#endif /* CURL_BUILD_MAC */
+ ciphers = malloc(*len * sizeof(SSLCipherSuite));
+ if(!ciphers)
+ goto failed;
+
+ err = SSLGetSupportedCiphers(ssl_ctx, ciphers, len);
+ if(err != noErr)
+ goto failed;
- /* Disable cipher suites that ST supports but are not safe. These ciphers
- are unlikely to be used in any case since ST gives other ciphers a much
- higher priority, but it's probably better that we not connect at all than
- to give the user a false sense of security if the server only supports
- insecure ciphers. (Note: We don't care about SSLv2-only ciphers.) */
- err = SSLGetNumberSupportedCiphers(ssl_ctx, &all_ciphers_count);
- if(err != noErr) {
- failf(data, "SSL: SSLGetNumberSupportedCiphers() failed: OSStatus %d",
- err);
- return CURLE_SSL_CIPHER;
- }
- all_ciphers = malloc(all_ciphers_count*sizeof(SSLCipherSuite));
- if(!all_ciphers) {
- failf(data, "SSL: Failed to allocate memory for all ciphers");
- return CURLE_OUT_OF_MEMORY;
- }
- allowed_ciphers = malloc(all_ciphers_count*sizeof(SSLCipherSuite));
- if(!allowed_ciphers) {
- Curl_safefree(all_ciphers);
- failf(data, "SSL: Failed to allocate memory for allowed ciphers");
- return CURLE_OUT_OF_MEMORY;
- }
- err = SSLGetSupportedCiphers(ssl_ctx, all_ciphers,
- &all_ciphers_count);
- if(err != noErr) {
- Curl_safefree(all_ciphers);
- Curl_safefree(allowed_ciphers);
- return CURLE_SSL_CIPHER;
- }
- for(i = 0UL ; i < all_ciphers_count ; i++) {
#if CURL_BUILD_MAC
- /* There's a known bug in early versions of Mountain Lion where ST's ECC
- ciphers (cipher suite 0xC001 through 0xC032) simply do not work.
- Work around the problem here by disabling those ciphers if we are
- running in an affected version of OS X. */
- if(darwinver_maj == 12 && darwinver_min <= 3 &&
- all_ciphers[i] >= 0xC001 && all_ciphers[i] <= 0xC032) {
- continue;
+ {
+ int maj = 0, min = 0;
+ GetDarwinVersionNumber(&maj, &min);
+ /* There is a known bug in early versions of Mountain Lion where ST's ECC
+ ciphers (cipher suite 0xC001 through 0xC032) simply do not work.
+ Work around the problem here by disabling those ciphers if we are
+ running in an affected version of OS X. */
+ if(maj == 12 && min <= 3) {
+ size_t i = 0, j = 0;
+ for(; i < *len; i++) {
+ if(ciphers[i] >= 0xC001 && ciphers[i] <= 0xC032)
+ continue;
+ ciphers[j++] = ciphers[i];
+ }
+ *len = j;
}
-#endif /* CURL_BUILD_MAC */
- if(is_cipher_suite_strong(all_ciphers[i])) {
- allowed_ciphers[allowed_ciphers_count++] = all_ciphers[i];
+ }
+#endif
+
+ return ciphers;
+failed:
+ *len = 0;
+ Curl_safefree(ciphers);
+ return NULL;
+}
+
+static CURLcode sectransp_set_default_ciphers(struct Curl_easy *data,
+ SSLContextRef ssl_ctx)
+{
+ CURLcode ret = CURLE_SSL_CIPHER;
+ size_t count = 0, i, j;
+ OSStatus err;
+ size_t supported_len;
+ SSLCipherSuite *ciphers = NULL;
+
+ ciphers = sectransp_get_supported_ciphers(ssl_ctx, &supported_len);
+ if(!ciphers) {
+ failf(data, "SSL: Failed to get supported ciphers");
+ goto failed;
+ }
+
+ /* Intersect the ciphers supported by Secure Transport with the default
+ * ciphers, using the order of the former. */
+ for(i = 0; i < supported_len; i++) {
+ for(j = 0; j < DEFAULT_CIPHERS_LEN; j++) {
+ if(default_ciphers[j] == ciphers[i]) {
+ ciphers[count++] = ciphers[i];
+ break;
+ }
}
}
- err = SSLSetEnabledCiphers(ssl_ctx, allowed_ciphers,
- allowed_ciphers_count);
- Curl_safefree(all_ciphers);
- Curl_safefree(allowed_ciphers);
+
+ if(count == 0) {
+ failf(data, "SSL: no supported default ciphers");
+ goto failed;
+ }
+
+ err = SSLSetEnabledCiphers(ssl_ctx, ciphers, count);
if(err != noErr) {
failf(data, "SSL: SSLSetEnabledCiphers() failed: OSStatus %d", err);
- return CURLE_SSL_CIPHER;
+ goto failed;
}
- return CURLE_OK;
+
+ ret = CURLE_OK;
+failed:
+ Curl_safefree(ciphers);
+ return ret;
}
static CURLcode sectransp_set_selected_ciphers(struct Curl_easy *data,
SSLContextRef ssl_ctx,
const char *ciphers)
{
- size_t ciphers_count = 0;
- const char *cipher_start = ciphers;
- OSStatus err = noErr;
- SSLCipherSuite selected_ciphers[NUM_OF_CIPHERS];
+ CURLcode ret = CURLE_SSL_CIPHER;
+ size_t count = 0, i;
+ const char *ptr, *end;
+ OSStatus err;
+ size_t supported_len;
+ SSLCipherSuite *supported = NULL;
+ SSLCipherSuite *selected = NULL;
- if(!ciphers)
- return CURLE_OK;
+ supported = sectransp_get_supported_ciphers(ssl_ctx, &supported_len);
+ if(!supported) {
+ failf(data, "SSL: Failed to get supported ciphers");
+ goto failed;
+ }
- while(sectransp_is_separator(*ciphers)) /* Skip initial separators. */
- ciphers++;
- if(!*ciphers)
- return CURLE_OK;
+ selected = malloc(supported_len * sizeof(SSLCipherSuite));
+ if(!selected) {
+ failf(data, "SSL: Failed to allocate memory");
+ goto failed;
+ }
- cipher_start = ciphers;
- while(*cipher_start && ciphers_count < NUM_OF_CIPHERS) {
- bool cipher_found = FALSE;
- size_t cipher_len = 0;
- const char *cipher_end = NULL;
- bool tls_name = FALSE;
- size_t i;
-
- /* Skip separators */
- while(sectransp_is_separator(*cipher_start))
- cipher_start++;
- if(*cipher_start == '\0') {
- break;
- }
- /* Find last position of a cipher in the ciphers string */
- cipher_end = cipher_start;
- while(*cipher_end != '\0' && !sectransp_is_separator(*cipher_end)) {
- ++cipher_end;
- }
+ for(ptr = ciphers; ptr[0] != '\0' && count < supported_len; ptr = end) {
+ uint16_t id = sectransp_cipher_suite_walk_str(&ptr, &end);
- /* IANA cipher names start with the TLS_ or SSL_ prefix.
- If the 4th symbol of the cipher is '_' we look for a cipher in the
- table by its (TLS) name.
- Otherwise, we try to match cipher by an alias. */
- if(cipher_start[3] == '_') {
- tls_name = TRUE;
+ /* Check if cipher is supported */
+ if(id) {
+ for(i = 0; i < supported_len && supported[i] != id; i++);
+ if(i == supported_len)
+ id = 0;
}
- /* Iterate through the cipher table and look for the cipher, starting
- the cipher number 0x01 because the 0x00 is not the real cipher */
- cipher_len = cipher_end - cipher_start;
- for(i = 1; i < NUM_OF_CIPHERS; ++i) {
- const char *table_cipher_name = NULL;
- if(tls_name) {
- table_cipher_name = ciphertable[i].name;
- }
- else if(ciphertable[i].alias_name) {
- table_cipher_name = ciphertable[i].alias_name;
- }
- else {
- continue;
- }
- /* Compare a part of the string between separators with a cipher name
- in the table and make sure we matched the whole cipher name */
- if(strncmp(cipher_start, table_cipher_name, cipher_len) == 0
- && table_cipher_name[cipher_len] == '\0') {
- selected_ciphers[ciphers_count] = ciphertable[i].num;
- ++ciphers_count;
- cipher_found = TRUE;
- break;
- }
- }
- if(!cipher_found) {
- /* It would be more human-readable if we print the wrong cipher name
- but we don't want to allocate any additional memory and copy the name
- into it, then add it into logs.
- Also, we do not modify an original cipher list string. We just point
- to positions where cipher starts and ends in the cipher list string.
- The message is a bit cryptic and longer than necessary but can be
- understood by humans. */
- failf(data, "SSL: cipher string \"%s\" contains unsupported cipher name"
- " starting position %zd and ending position %zd",
- ciphers,
- cipher_start - ciphers,
- cipher_end - ciphers);
- return CURLE_SSL_CIPHER;
- }
- if(*cipher_end) {
- cipher_start = cipher_end + 1;
+ if(!id) {
+ if(ptr[0] != '\0')
+ infof(data, "SSL: unknown cipher in list: \"%.*s\"", (int) (end - ptr),
+ ptr);
+ continue;
}
- else {
- break;
+
+ /* No duplicates allowed (so selected cannot overflow) */
+ for(i = 0; i < count && selected[i] != id; i++);
+ if(i < count) {
+ infof(data, "SSL: duplicate cipher in list: \"%.*s\"", (int) (end - ptr),
+ ptr);
+ continue;
}
+
+ selected[count++] = id;
+ }
+
+ if(count == 0) {
+ failf(data, "SSL: no supported cipher in list");
+ goto failed;
}
- /* All cipher suites in the list are found. Report to logs as-is */
- infof(data, "SSL: Setting cipher suites list \"%s\"", ciphers);
- err = SSLSetEnabledCiphers(ssl_ctx, selected_ciphers, ciphers_count);
+ err = SSLSetEnabledCiphers(ssl_ctx, selected, count);
if(err != noErr) {
failf(data, "SSL: SSLSetEnabledCiphers() failed: OSStatus %d", err);
- return CURLE_SSL_CIPHER;
+ goto failed;
}
- return CURLE_OK;
+
+ ret = CURLE_OK;
+failed:
+ Curl_safefree(supported);
+ Curl_safefree(selected);
+ return ret;
}
static void sectransp_session_free(void *sessionid, size_t idsize)
{
/* ST, as of iOS 5 and Mountain Lion, has no public method of deleting a
cached session ID inside the Security framework. There is a private
- function that does this, but I don't want to have to explain to you why I
+ function that does this, but I do not want to have to explain to you why I
got your application rejected from the App Store due to the use of a
private API, so the best we can do is free up our own char array that we
created way back in sectransp_connect_step1... */
@@ -1665,6 +1088,7 @@ static CURLcode sectransp_connect_step1(struct Curl_cfilter *cf,
const struct curl_blob *ssl_cert_blob = ssl_config->primary.cert_blob;
char *ciphers;
OSStatus err = noErr;
+ CURLcode result;
#if CURL_BUILD_MAC
int darwinver_maj = 0, darwinver_min = 0;
@@ -1675,23 +1099,23 @@ static CURLcode sectransp_connect_step1(struct Curl_cfilter *cf,
#endif /* CURL_BUILD_MAC */
#if CURL_BUILD_MAC_10_8 || CURL_BUILD_IOS
- if(SSLCreateContext) { /* use the newer API if available */
+ if(&SSLCreateContext) { /* use the newer API if available */
if(backend->ssl_ctx)
CFRelease(backend->ssl_ctx);
backend->ssl_ctx = SSLCreateContext(NULL, kSSLClientSide, kSSLStreamType);
if(!backend->ssl_ctx) {
- failf(data, "SSL: couldn't create a context");
+ failf(data, "SSL: could not create a context");
return CURLE_OUT_OF_MEMORY;
}
}
else {
- /* The old ST API does not exist under iOS, so don't compile it: */
+ /* The old ST API does not exist under iOS, so do not compile it: */
#if CURL_SUPPORT_MAC_10_8
if(backend->ssl_ctx)
(void)SSLDisposeContext(backend->ssl_ctx);
err = SSLNewContext(false, &(backend->ssl_ctx));
if(err != noErr) {
- failf(data, "SSL: couldn't create a context: OSStatus %d", err);
+ failf(data, "SSL: could not create a context: OSStatus %d", err);
return CURLE_OUT_OF_MEMORY;
}
#endif /* CURL_SUPPORT_MAC_10_8 */
@@ -1701,19 +1125,20 @@ static CURLcode sectransp_connect_step1(struct Curl_cfilter *cf,
(void)SSLDisposeContext(backend->ssl_ctx);
err = SSLNewContext(false, &(backend->ssl_ctx));
if(err != noErr) {
- failf(data, "SSL: couldn't create a context: OSStatus %d", err);
+ failf(data, "SSL: could not create a context: OSStatus %d", err);
return CURLE_OUT_OF_MEMORY;
}
#endif /* CURL_BUILD_MAC_10_8 || CURL_BUILD_IOS */
backend->ssl_write_buffered_length = 0UL; /* reset buffered write length */
- /* check to see if we've been told to use an explicit SSL/TLS version */
+ /* check to see if we have been told to use an explicit SSL/TLS version */
#if CURL_BUILD_MAC_10_8 || CURL_BUILD_IOS
- if(SSLSetProtocolVersionMax) {
+ if(&SSLSetProtocolVersionMax) {
switch(conn_config->version) {
case CURL_SSLVERSION_TLSv1:
(void)SSLSetProtocolVersionMin(backend->ssl_ctx, kTLSProtocol1);
-#if (CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && HAVE_BUILTIN_AVAILABLE == 1
+#if (CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && \
+ defined(HAVE_BUILTIN_AVAILABLE)
if(__builtin_available(macOS 10.13, iOS 11.0, *)) {
(void)SSLSetProtocolVersionMax(backend->ssl_ctx, kTLSProtocol13);
}
@@ -1723,19 +1148,17 @@ static CURLcode sectransp_connect_step1(struct Curl_cfilter *cf,
#else
(void)SSLSetProtocolVersionMax(backend->ssl_ctx, kTLSProtocol12);
#endif /* (CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) &&
- HAVE_BUILTIN_AVAILABLE == 1 */
+ defined(HAVE_BUILTIN_AVAILABLE) */
break;
case CURL_SSLVERSION_DEFAULT:
case CURL_SSLVERSION_TLSv1_0:
case CURL_SSLVERSION_TLSv1_1:
case CURL_SSLVERSION_TLSv1_2:
case CURL_SSLVERSION_TLSv1_3:
- {
- CURLcode result = set_ssl_version_min_max(cf, data);
- if(result != CURLE_OK)
- return result;
- break;
- }
+ result = set_ssl_version_min_max(cf, data);
+ if(result != CURLE_OK)
+ return result;
+ break;
case CURL_SSLVERSION_SSLv3:
case CURL_SSLVERSION_SSLv2:
failf(data, "SSL versions not supported");
@@ -1767,12 +1190,10 @@ static CURLcode sectransp_connect_step1(struct Curl_cfilter *cf,
case CURL_SSLVERSION_TLSv1_1:
case CURL_SSLVERSION_TLSv1_2:
case CURL_SSLVERSION_TLSv1_3:
- {
- CURLcode result = set_ssl_version_min_max(cf, data);
- if(result != CURLE_OK)
- return result;
- break;
- }
+ result = set_ssl_version_min_max(cf, data);
+ if(result != CURLE_OK)
+ return result;
+ break;
case CURL_SSLVERSION_SSLv3:
case CURL_SSLVERSION_SSLv2:
failf(data, "SSL versions not supported");
@@ -1817,7 +1238,8 @@ static CURLcode sectransp_connect_step1(struct Curl_cfilter *cf,
}
#endif /* CURL_BUILD_MAC_10_8 || CURL_BUILD_IOS */
-#if (CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && HAVE_BUILTIN_AVAILABLE == 1
+#if (CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && \
+ defined(HAVE_BUILTIN_AVAILABLE)
if(connssl->alpn) {
if(__builtin_available(macOS 10.13.4, iOS 11, tvOS 11, *)) {
struct alpn_proto_buf proto;
@@ -1886,7 +1308,7 @@ static CURLcode sectransp_connect_step1(struct Curl_cfilter *cf,
err = SecIdentityCopyCertificate(cert_and_key, &cert);
if(err == noErr) {
char *certp;
- CURLcode result = CopyCertSubject(data, cert, &certp);
+ result = CopyCertSubject(data, cert, &certp);
if(!result) {
infof(data, "Client certificate: %s", certp);
free(certp);
@@ -1929,11 +1351,11 @@ static CURLcode sectransp_connect_step1(struct Curl_cfilter *cf,
cert_showfilename_error);
break;
case errSecItemNotFound:
- failf(data, "SSL: Can't find the certificate \"%s\" and its private "
+ failf(data, "SSL: cannot find the certificate \"%s\" and its private "
"key in the Keychain.", cert_showfilename_error);
break;
default:
- failf(data, "SSL: Can't load the certificate \"%s\" and its private "
+ failf(data, "SSL: cannot load the certificate \"%s\" and its private "
"key: OSStatus %d", cert_showfilename_error, err);
break;
}
@@ -1948,7 +1370,7 @@ static CURLcode sectransp_connect_step1(struct Curl_cfilter *cf,
#if CURL_BUILD_MAC_10_6 || CURL_BUILD_IOS
/* Snow Leopard introduced the SSLSetSessionOption() function, but due to
a library bug with the way the kSSLSessionOptionBreakOnServerAuth flag
- works, it doesn't work as expected under Snow Leopard, Lion or
+ works, it does not work as expected under Snow Leopard, Lion or
Mountain Lion.
So we need to call SSLSetEnableCertVerify() on those older cats in order
to disable certificate validation if the user turned that off.
@@ -1962,9 +1384,9 @@ static CURLcode sectransp_connect_step1(struct Curl_cfilter *cf,
Darwin 15.x.x is El Capitan (10.11)
*/
#if CURL_BUILD_MAC
- if(SSLSetSessionOption && darwinver_maj >= 13) {
+ if(&SSLSetSessionOption && darwinver_maj >= 13) {
#else
- if(SSLSetSessionOption) {
+ if(&SSLSetSessionOption) {
#endif /* CURL_BUILD_MAC */
bool break_on_auth = !conn_config->verifypeer ||
ssl_cafile || ssl_cablob;
@@ -2000,7 +1422,7 @@ static CURLcode sectransp_connect_step1(struct Curl_cfilter *cf,
bool is_cert_file = (!is_cert_data) && is_file(ssl_cafile);
if(!(is_cert_file || is_cert_data)) {
- failf(data, "SSL: can't load CA certificate file %s",
+ failf(data, "SSL: cannot load CA certificate file %s",
ssl_cafile ? ssl_cafile : "(blob memory)");
return CURLE_SSL_CACERT_BADFILE;
}
@@ -2031,21 +1453,21 @@ static CURLcode sectransp_connect_step1(struct Curl_cfilter *cf,
ciphers = conn_config->cipher_list;
if(ciphers) {
- err = sectransp_set_selected_ciphers(data, backend->ssl_ctx, ciphers);
+ result = sectransp_set_selected_ciphers(data, backend->ssl_ctx, ciphers);
}
else {
- err = sectransp_set_default_ciphers(data, backend->ssl_ctx);
+ result = sectransp_set_default_ciphers(data, backend->ssl_ctx);
}
- if(err != noErr) {
+ if(result != CURLE_OK) {
failf(data, "SSL: Unable to set ciphers for SSL/TLS handshake. "
- "Error code: %d", err);
+ "Error code: %d", (int)result);
return CURLE_SSL_CIPHER;
}
#if CURL_BUILD_MAC_10_9 || CURL_BUILD_IOS_7
/* We want to enable 1/n-1 when using a CBC cipher unless the user
- specifically doesn't want us doing that: */
- if(SSLSetSessionOption) {
+ specifically does not want us doing that: */
+ if(&SSLSetSessionOption) {
SSLSetSessionOption(backend->ssl_ctx, kSSLSessionOptionSendOneByteRecord,
!ssl_config->enable_beast);
SSLSetSessionOption(backend->ssl_ctx, kSSLSessionOptionFalseStart,
@@ -2053,8 +1475,8 @@ static CURLcode sectransp_connect_step1(struct Curl_cfilter *cf,
}
#endif /* CURL_BUILD_MAC_10_9 || CURL_BUILD_IOS_7 */
- /* Check if there's a cached ID we can/should use here! */
- if(ssl_config->primary.sessionid) {
+ /* Check if there is a cached ID we can/should use here! */
+ if(ssl_config->primary.cache_session) {
char *ssl_sessionid;
size_t ssl_sessionid_len;
@@ -2071,10 +1493,9 @@ static CURLcode sectransp_connect_step1(struct Curl_cfilter *cf,
/* Informational message */
infof(data, "SSL reusing session ID");
}
- /* If there isn't one, then let's make one up! This has to be done prior
+ /* If there is not one, then let's make one up! This has to be done prior
to starting the handshake. */
else {
- CURLcode result;
ssl_sessionid =
aprintf("%s:%d:%d:%s:%d",
ssl_cafile ? ssl_cafile : "(blob memory)",
@@ -2089,9 +1510,9 @@ static CURLcode sectransp_connect_step1(struct Curl_cfilter *cf,
return CURLE_SSL_CONNECT_ERROR;
}
- result = Curl_ssl_addsessionid(cf, data, &connssl->peer, ssl_sessionid,
- ssl_sessionid_len,
- sectransp_session_free);
+ result = Curl_ssl_set_sessionid(cf, data, &connssl->peer, ssl_sessionid,
+ ssl_sessionid_len,
+ sectransp_session_free);
Curl_ssl_sessionid_unlock(data);
if(result)
return result;
@@ -2121,7 +1542,7 @@ static long pem_to_der(const char *in, unsigned char **out, size_t *outlen)
char *sep_start, *sep_end, *cert_start, *cert_end;
size_t i, j, err;
size_t len;
- unsigned char *b64;
+ char *b64;
/* Jump through the separators at the beginning of the certificate. */
sep_start = strstr(in, "-----");
@@ -2202,16 +1623,16 @@ static int read_cert(const char *file, unsigned char **out, size_t *outlen)
return 0;
}
-static int append_cert_to_array(struct Curl_easy *data,
- const unsigned char *buf, size_t buflen,
- CFMutableArrayRef array)
+static CURLcode append_cert_to_array(struct Curl_easy *data,
+ const unsigned char *buf, size_t buflen,
+ CFMutableArrayRef array)
{
char *certp;
CURLcode result;
SecCertificateRef cacert;
CFDataRef certdata;
- certdata = CFDataCreate(kCFAllocatorDefault, buf, buflen);
+ certdata = CFDataCreate(kCFAllocatorDefault, buf, (CFIndex)buflen);
if(!certdata) {
failf(data, "SSL: failed to allocate array for CA certificate");
return CURLE_OUT_OF_MEMORY;
@@ -2248,7 +1669,8 @@ static CURLcode verify_cert_buf(struct Curl_cfilter *cf,
const unsigned char *certbuf, size_t buflen,
SSLContextRef ctx)
{
- int n = 0, rc;
+ int n = 0;
+ CURLcode rc;
long res;
unsigned char *der;
size_t derlen, offset = 0;
@@ -2419,7 +1841,7 @@ static CURLcode pkp_pin_peer_pubkey(struct Curl_easy *data,
/* Result is returned to caller */
CURLcode result = CURLE_SSL_PINNEDPUBKEYNOTMATCH;
- /* if a path wasn't specified, don't pin */
+ /* if a path was not specified, do not pin */
if(!pinnedpubkey)
return CURLE_OK;
@@ -2451,17 +1873,17 @@ static CURLcode pkp_pin_peer_pubkey(struct Curl_easy *data,
#elif SECTRANSP_PINNEDPUBKEY_V2
{
- OSStatus success;
- success = SecItemExport(keyRef, kSecFormatOpenSSL, 0, NULL,
- &publicKeyBits);
- CFRelease(keyRef);
- if(success != errSecSuccess || !publicKeyBits)
- break;
+ OSStatus success;
+ success = SecItemExport(keyRef, kSecFormatOpenSSL, 0, NULL,
+ &publicKeyBits);
+ CFRelease(keyRef);
+ if(success != errSecSuccess || !publicKeyBits)
+ break;
}
#endif /* SECTRANSP_PINNEDPUBKEY_V2 */
- pubkeylen = CFDataGetLength(publicKeyBits);
+ pubkeylen = (size_t)CFDataGetLength(publicKeyBits);
pubkey = (unsigned char *)CFDataGetBytePtr(publicKeyBits);
switch(pubkeylen) {
@@ -2530,24 +1952,23 @@ static CURLcode sectransp_connect_step2(struct Curl_cfilter *cf,
SSLCipherSuite cipher;
SSLProtocol protocol = 0;
- DEBUGASSERT(ssl_connect_2 == connssl->connecting_state
- || ssl_connect_2_reading == connssl->connecting_state
- || ssl_connect_2_writing == connssl->connecting_state);
+ DEBUGASSERT(ssl_connect_2 == connssl->connecting_state);
DEBUGASSERT(backend);
CURL_TRC_CF(data, cf, "connect_step2");
/* Here goes nothing: */
check_handshake:
+ connssl->io_need = CURL_SSL_IO_NEED_NONE;
err = SSLHandshake(backend->ssl_ctx);
if(err != noErr) {
switch(err) {
- case errSSLWouldBlock: /* they're not done with us yet */
- connssl->connecting_state = backend->ssl_direction ?
- ssl_connect_2_writing : ssl_connect_2_reading;
+ case errSSLWouldBlock: /* they are not done with us yet */
+ connssl->io_need = backend->ssl_direction ?
+ CURL_SSL_IO_NEED_SEND : CURL_SSL_IO_NEED_RECV;
return CURLE_OK;
- /* The below is errSSLServerAuthCompleted; it's not defined in
+ /* The below is errSSLServerAuthCompleted; it is not defined in
Leopard's headers */
case -9841:
if((conn_config->CAfile || conn_config->ca_info_blob) &&
@@ -2657,8 +2078,8 @@ check_handshake:
"authority");
break;
- /* This error is raised if the server's cert didn't match the server's
- host name: */
+ /* This error is raised if the server's cert did not match the server's
+ hostname: */
case errSSLHostNameMismatch:
failf(data, "SSL certificate peer verification failed, the "
"certificate did not match \"%s\"\n", connssl->peer.dispname);
@@ -2759,7 +2180,8 @@ check_handshake:
return CURLE_SSL_CONNECT_ERROR;
}
else {
- /* we have been connected fine, we're not waiting for anything else. */
+ char cipher_str[64];
+ /* we have been connected fine, we are not waiting for anything else. */
connssl->connecting_state = ssl_connect_3;
#ifdef SECTRANSP_PINNEDPUBKEY
@@ -2777,33 +2199,30 @@ check_handshake:
/* Informational message */
(void)SSLGetNegotiatedCipher(backend->ssl_ctx, &cipher);
(void)SSLGetNegotiatedProtocolVersion(backend->ssl_ctx, &protocol);
+
+ sectransp_cipher_suite_get_str((uint16_t) cipher, cipher_str,
+ sizeof(cipher_str), true);
switch(protocol) {
case kSSLProtocol2:
- infof(data, "SSL 2.0 connection using %s",
- TLSCipherNameForNumber(cipher));
+ infof(data, "SSL 2.0 connection using %s", cipher_str);
break;
case kSSLProtocol3:
- infof(data, "SSL 3.0 connection using %s",
- TLSCipherNameForNumber(cipher));
+ infof(data, "SSL 3.0 connection using %s", cipher_str);
break;
case kTLSProtocol1:
- infof(data, "TLS 1.0 connection using %s",
- TLSCipherNameForNumber(cipher));
+ infof(data, "TLS 1.0 connection using %s", cipher_str);
break;
#if CURL_BUILD_MAC_10_8 || CURL_BUILD_IOS
case kTLSProtocol11:
- infof(data, "TLS 1.1 connection using %s",
- TLSCipherNameForNumber(cipher));
+ infof(data, "TLS 1.1 connection using %s", cipher_str);
break;
case kTLSProtocol12:
- infof(data, "TLS 1.2 connection using %s",
- TLSCipherNameForNumber(cipher));
+ infof(data, "TLS 1.2 connection using %s", cipher_str);
break;
#endif /* CURL_BUILD_MAC_10_8 || CURL_BUILD_IOS */
#if CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11
case kTLSProtocol13:
- infof(data, "TLS 1.3 connection using %s",
- TLSCipherNameForNumber(cipher));
+ infof(data, "TLS 1.3 connection using %s", cipher_str);
break;
#endif /* CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11 */
default:
@@ -2811,7 +2230,8 @@ check_handshake:
break;
}
-#if(CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && HAVE_BUILTIN_AVAILABLE == 1
+#if (CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && \
+ defined(HAVE_BUILTIN_AVAILABLE)
if(connssl->alpn) {
if(__builtin_available(macOS 10.13.4, iOS 11, tvOS 11, *)) {
CFArrayRef alpnArr = NULL;
@@ -2839,7 +2259,7 @@ check_handshake:
BUNDLE_MULTIPLEX : BUNDLE_NO_MULTIUSE);
/* chosenProtocol is a reference to the string within alpnArr
- and doesn't need to be freed separately */
+ and does not need to be freed separately */
if(alpnArr)
CFRelease(alpnArr);
}
@@ -2941,10 +2361,10 @@ static CURLcode collect_server_cert(struct Curl_cfilter *cf,
/* SSLCopyPeerCertificates() is deprecated as of Mountain Lion.
The function SecTrustGetCertificateAtIndex() is officially present
in Lion, but it is unfortunately also present in Snow Leopard as
- private API and doesn't work as expected. So we have to look for
+ private API and does not work as expected. So we have to look for
a different symbol to make sure this code is only executed under
Lion or later. */
- if(SecTrustCopyPublicKey) {
+ if(&SecTrustCopyPublicKey) {
#pragma unused(server_certs)
err = SSLCopyPeerTrust(backend->ssl_ctx, &trust);
/* For some reason, SSLCopyPeerTrust() can return noErr and yet return
@@ -3030,7 +2450,7 @@ sectransp_connect_common(struct Curl_cfilter *cf, struct Curl_easy *data,
}
if(ssl_connect_1 == connssl->connecting_state) {
- /* Find out how much more time we're allowed */
+ /* Find out how much more time we are allowed */
const timediff_t timeout_ms = Curl_timeleft(data, NULL, TRUE);
if(timeout_ms < 0) {
@@ -3044,9 +2464,7 @@ sectransp_connect_common(struct Curl_cfilter *cf, struct Curl_easy *data,
return result;
}
- while(ssl_connect_2 == connssl->connecting_state ||
- ssl_connect_2_reading == connssl->connecting_state ||
- ssl_connect_2_writing == connssl->connecting_state) {
+ while(ssl_connect_2 == connssl->connecting_state) {
/* check allowed time left */
const timediff_t timeout_ms = Curl_timeleft(data, NULL, TRUE);
@@ -3057,14 +2475,13 @@ sectransp_connect_common(struct Curl_cfilter *cf, struct Curl_easy *data,
return CURLE_OPERATION_TIMEDOUT;
}
- /* if ssl is expecting something, check if it's available. */
- if(connssl->connecting_state == ssl_connect_2_reading ||
- connssl->connecting_state == ssl_connect_2_writing) {
+ /* if ssl is expecting something, check if it is available. */
+ if(connssl->io_need) {
- curl_socket_t writefd = ssl_connect_2_writing ==
- connssl->connecting_state?sockfd:CURL_SOCKET_BAD;
- curl_socket_t readfd = ssl_connect_2_reading ==
- connssl->connecting_state?sockfd:CURL_SOCKET_BAD;
+ curl_socket_t writefd = (connssl->io_need & CURL_SSL_IO_NEED_SEND)?
+ sockfd:CURL_SOCKET_BAD;
+ curl_socket_t readfd = (connssl->io_need & CURL_SSL_IO_NEED_RECV)?
+ sockfd:CURL_SOCKET_BAD;
what = Curl_socket_check(readfd, CURL_SOCKET_BAD, writefd,
nonblocking ? 0 : timeout_ms);
@@ -3094,10 +2511,7 @@ sectransp_connect_common(struct Curl_cfilter *cf, struct Curl_easy *data,
* or epoll() will always have a valid fdset to wait on.
*/
result = sectransp_connect_step2(cf, data);
- if(result || (nonblocking &&
- (ssl_connect_2 == connssl->connecting_state ||
- ssl_connect_2_reading == connssl->connecting_state ||
- ssl_connect_2_writing == connssl->connecting_state)))
+ if(result || (nonblocking && (ssl_connect_2 == connssl->connecting_state)))
return result;
} /* repeat step2 until all transactions are done. */
@@ -3146,6 +2560,92 @@ static CURLcode sectransp_connect(struct Curl_cfilter *cf,
return CURLE_OK;
}
+static ssize_t sectransp_recv(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ char *buf,
+ size_t buffersize,
+ CURLcode *curlcode);
+
+static CURLcode sectransp_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ bool send_shutdown, bool *done)
+{
+ struct ssl_connect_data *connssl = cf->ctx;
+ struct st_ssl_backend_data *backend =
+ (struct st_ssl_backend_data *)connssl->backend;
+ CURLcode result = CURLE_OK;
+ ssize_t nread;
+ char buf[1024];
+ size_t i;
+
+ DEBUGASSERT(backend);
+ if(!backend->ssl_ctx || cf->shutdown) {
+ *done = TRUE;
+ goto out;
+ }
+
+ connssl->io_need = CURL_SSL_IO_NEED_NONE;
+ *done = FALSE;
+
+ if(send_shutdown && !backend->sent_shutdown) {
+ OSStatus err;
+
+ CURL_TRC_CF(data, cf, "shutdown, send close notify");
+ err = SSLClose(backend->ssl_ctx);
+ switch(err) {
+ case noErr:
+ backend->sent_shutdown = TRUE;
+ break;
+ case errSSLWouldBlock:
+ connssl->io_need = CURL_SSL_IO_NEED_SEND;
+ result = CURLE_OK;
+ goto out;
+ default:
+ CURL_TRC_CF(data, cf, "shutdown, error: %d", (int)err);
+ result = CURLE_SEND_ERROR;
+ goto out;
+ }
+ }
+
+ for(i = 0; i < 10; ++i) {
+ if(!backend->sent_shutdown) {
+ nread = sectransp_recv(cf, data, buf, (int)sizeof(buf), &result);
+ }
+ else {
+ /* We would like to read the close notify from the server using
+ * secure transport, however SSLRead() no longer works after we
+ * sent the notify from our side. So, we just read from the
+ * underlying filter and hope it will end. */
+ nread = Curl_conn_cf_recv(cf->next, data, buf, sizeof(buf), &result);
+ }
+ CURL_TRC_CF(data, cf, "shutdown read -> %zd, %d", nread, result);
+ if(nread <= 0)
+ break;
+ }
+
+ if(nread > 0) {
+ /* still data coming in? */
+ connssl->io_need = CURL_SSL_IO_NEED_RECV;
+ }
+ else if(nread == 0) {
+ /* We got the close notify alert and are done. */
+ CURL_TRC_CF(data, cf, "shutdown done");
+ *done = TRUE;
+ }
+ else if(result == CURLE_AGAIN) {
+ connssl->io_need = CURL_SSL_IO_NEED_RECV;
+ result = CURLE_OK;
+ }
+ else {
+ DEBUGASSERT(result);
+ CURL_TRC_CF(data, cf, "shutdown, error: %d", result);
+ }
+
+out:
+ cf->shutdown = (result || *done);
+ return result;
+}
+
static void sectransp_close(struct Curl_cfilter *cf, struct Curl_easy *data)
{
struct ssl_connect_data *connssl = cf->ctx;
@@ -3158,9 +2658,8 @@ static void sectransp_close(struct Curl_cfilter *cf, struct Curl_easy *data)
if(backend->ssl_ctx) {
CURL_TRC_CF(data, cf, "close");
- (void)SSLClose(backend->ssl_ctx);
#if CURL_BUILD_MAC_10_8 || CURL_BUILD_IOS
- if(SSLCreateContext)
+ if(&SSLCreateContext)
CFRelease(backend->ssl_ctx);
#if CURL_SUPPORT_MAC_10_8
else
@@ -3173,69 +2672,6 @@ static void sectransp_close(struct Curl_cfilter *cf, struct Curl_easy *data)
}
}
-static int sectransp_shutdown(struct Curl_cfilter *cf,
- struct Curl_easy *data)
-{
- struct ssl_connect_data *connssl = cf->ctx;
- struct st_ssl_backend_data *backend =
- (struct st_ssl_backend_data *)connssl->backend;
- ssize_t nread;
- int what;
- int rc;
- char buf[120];
- int loop = 10; /* avoid getting stuck */
- CURLcode result;
-
- DEBUGASSERT(backend);
-
- if(!backend->ssl_ctx)
- return 0;
-
-#ifndef CURL_DISABLE_FTP
- if(data->set.ftp_ccc != CURLFTPSSL_CCC_ACTIVE)
- return 0;
-#endif
-
- sectransp_close(cf, data);
-
- rc = 0;
-
- what = SOCKET_READABLE(Curl_conn_cf_get_socket(cf, data),
- SSL_SHUTDOWN_TIMEOUT);
-
- CURL_TRC_CF(data, cf, "shutdown");
- while(loop--) {
- if(what < 0) {
- /* anything that gets here is fatally bad */
- failf(data, "select/poll on SSL socket, errno: %d", SOCKERRNO);
- rc = -1;
- break;
- }
-
- if(!what) { /* timeout */
- failf(data, "SSL shutdown timeout");
- break;
- }
-
- /* Something to read, let's do it and hope that it is the close
- notify alert from the server. No way to SSL_Read now, so use read(). */
-
- nread = Curl_conn_cf_recv(cf->next, data, buf, sizeof(buf), &result);
-
- if(nread < 0) {
- failf(data, "read: %s", curl_easy_strerror(result));
- rc = -1;
- }
-
- if(nread <= 0)
- break;
-
- what = SOCKET_READABLE(Curl_conn_cf_get_socket(cf, data), 0);
- }
-
- return rc;
-}
-
static size_t sectransp_version(char *buffer, size_t size)
{
return msnprintf(buffer, size, "SecureTransport");
@@ -3267,7 +2703,7 @@ static bool sectransp_data_pending(struct Curl_cfilter *cf,
static CURLcode sectransp_random(struct Curl_easy *data UNUSED_PARAM,
unsigned char *entropy, size_t length)
{
- /* arc4random_buf() isn't available on cats older than Lion, so let's
+ /* arc4random_buf() is not available on cats older than Lion, so let's
do this manually for the benefit of the older cats. */
size_t i;
u_int32_t random_number = 0;
@@ -3298,7 +2734,7 @@ static CURLcode sectransp_sha256sum(const unsigned char *tmp, /* input */
static bool sectransp_false_start(void)
{
#if CURL_BUILD_MAC_10_9 || CURL_BUILD_IOS_7
- if(SSLSetSessionOption)
+ if(&SSLSetSessionOption)
return TRUE;
#endif
return FALSE;
@@ -3325,7 +2761,7 @@ static ssize_t sectransp_send(struct Curl_cfilter *cf,
Now, one could interpret that as "written to the socket," but actually,
it returns the amount of data that was written to a buffer internal to
- the SSLContextRef instead. So it's possible for SSLWrite() to return
+ the SSLContextRef instead. So it is possible for SSLWrite() to return
errSSLWouldBlock and a number of bytes "written" because those bytes were
encrypted and written to a buffer, not to the socket.
@@ -3338,7 +2774,7 @@ static ssize_t sectransp_send(struct Curl_cfilter *cf,
err = SSLWrite(backend->ssl_ctx, NULL, 0UL, &processed);
switch(err) {
case noErr:
- /* processed is always going to be 0 because we didn't write to
+ /* processed is always going to be 0 because we did not write to
the buffer, so return how much was written to the socket */
processed = backend->ssl_write_buffered_length;
backend->ssl_write_buffered_length = 0UL;
@@ -3353,7 +2789,7 @@ static ssize_t sectransp_send(struct Curl_cfilter *cf,
}
}
else {
- /* We've got new data to write: */
+ /* We have got new data to write: */
err = SSLWrite(backend->ssl_ctx, mem, len, &processed);
if(err != noErr) {
switch(err) {
@@ -3410,7 +2846,7 @@ again:
*curlcode = CURLE_OK;
return 0;
- /* The below is errSSLPeerAuthCompleted; it's not defined in
+ /* The below is errSSLPeerAuthCompleted; it is not defined in
Leopard's headers */
case -9841:
if((conn_config->CAfile || conn_config->ca_info_blob) &&
@@ -3476,7 +2912,6 @@ const struct Curl_ssl Curl_ssl_sectransp = {
sectransp_sha256sum, /* sha256sum */
NULL, /* associate_connection */
NULL, /* disassociate_connection */
- NULL, /* free_multi_ssl_backend_data */
sectransp_recv, /* recv decrypted data */
sectransp_send, /* send data to encrypt */
};
diff --git a/libs/libcurl/src/vtls/vtls.c b/libs/libcurl/src/vtls/vtls.c
index f4633a5f1c..8e5f8a5491 100644
--- a/libs/libcurl/src/vtls/vtls.c
+++ b/libs/libcurl/src/vtls/vtls.c
@@ -68,6 +68,8 @@
#include "curl_base64.h"
#include "curl_printf.h"
#include "inet_pton.h"
+#include "connect.h"
+#include "select.h"
#include "strdup.h"
/* The last #include files should be: */
@@ -103,7 +105,7 @@ static CURLcode blobdup(struct curl_blob **dest,
DEBUGASSERT(dest);
DEBUGASSERT(!*dest);
if(src) {
- /* only if there's data to dupe! */
+ /* only if there is data to dupe! */
struct curl_blob *d;
d = malloc(sizeof(struct curl_blob) + src->len);
if(!d)
@@ -152,7 +154,7 @@ static const struct alpn_spec *alpn_get_spec(int httpwant, bool use_alpn)
(void)httpwant;
#endif
/* Use the ALPN protocol "http/1.1" for HTTP/1.x.
- Avoid "http/1.0" because some servers don't support it. */
+ Avoid "http/1.0" because some servers do not support it. */
return &ALPN_SPEC_H11;
}
#endif /* USE_SSL */
@@ -166,7 +168,7 @@ void Curl_ssl_easy_config_init(struct Curl_easy *data)
*/
data->set.ssl.primary.verifypeer = TRUE;
data->set.ssl.primary.verifyhost = TRUE;
- data->set.ssl.primary.sessionid = TRUE; /* session ID caching by default */
+ data->set.ssl.primary.cache_session = TRUE; /* caching by default */
#ifndef CURL_DISABLE_PROXY
data->set.proxy_ssl = data->set.ssl;
#endif
@@ -228,7 +230,7 @@ static bool clone_ssl_primary_config(struct ssl_primary_config *source,
dest->verifypeer = source->verifypeer;
dest->verifyhost = source->verifyhost;
dest->verifystatus = source->verifystatus;
- dest->sessionid = source->sessionid;
+ dest->cache_session = source->cache_session;
dest->ssl_options = source->ssl_options;
CLONE_BLOB(cert_blob);
@@ -453,7 +455,7 @@ static bool ssl_prefs_check(struct Curl_easy *data)
}
static struct ssl_connect_data *cf_ctx_new(struct Curl_easy *data,
- const struct alpn_spec *alpn)
+ const struct alpn_spec *alpn)
{
struct ssl_connect_data *ctx;
@@ -529,8 +531,8 @@ void Curl_ssl_sessionid_unlock(struct Curl_easy *data)
}
/*
- * Check if there's a session ID for the given connection in the cache, and if
- * there's one suitable, it is provided. Returns TRUE when no entry matched.
+ * Check if there is a session ID for the given connection in the cache, and if
+ * there is one suitable, it is provided. Returns TRUE when no entry matched.
*/
bool Curl_ssl_getsessionid(struct Curl_cfilter *cf,
struct Curl_easy *data,
@@ -549,9 +551,9 @@ bool Curl_ssl_getsessionid(struct Curl_cfilter *cf,
if(!ssl_config)
return TRUE;
- DEBUGASSERT(ssl_config->primary.sessionid);
+ DEBUGASSERT(ssl_config->primary.cache_session);
- if(!ssl_config->primary.sessionid || !data->state.session)
+ if(!ssl_config->primary.cache_session || !data->state.session)
/* session ID reuse is disabled or the session cache has not been
setup */
return TRUE;
@@ -590,7 +592,7 @@ bool Curl_ssl_getsessionid(struct Curl_cfilter *cf,
}
DEBUGF(infof(data, "%s Session ID in cache for %s %s://%s:%d",
- no_match? "Didn't find": "Found",
+ no_match? "Did not find": "Found",
Curl_ssl_cf_is_proxy(cf) ? "proxy" : "host",
cf->conn->handler->scheme, peer->hostname, peer->port));
return no_match;
@@ -635,18 +637,12 @@ void Curl_ssl_delsessionid(struct Curl_easy *data, void *ssl_sessionid)
}
}
-/*
- * Store session id in the session cache. The ID passed on to this function
- * must already have been extracted and allocated the proper way for the SSL
- * layer. Curl_XXXX_session_free() will be called to free/kill the session ID
- * later on.
- */
-CURLcode Curl_ssl_addsessionid(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- const struct ssl_peer *peer,
- void *ssl_sessionid,
- size_t idsize,
- Curl_ssl_sessionid_dtor *sessionid_free_cb)
+CURLcode Curl_ssl_set_sessionid(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ const struct ssl_peer *peer,
+ void *ssl_sessionid,
+ size_t idsize,
+ Curl_ssl_sessionid_dtor *sessionid_free_cb)
{
struct ssl_config_data *ssl_config = Curl_ssl_cf_get_config(cf, data);
struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
@@ -657,6 +653,8 @@ CURLcode Curl_ssl_addsessionid(struct Curl_cfilter *cf,
char *clone_conn_to_host = NULL;
int conn_to_port;
long *general_age;
+ void *old_sessionid;
+ size_t old_size;
CURLcode result = CURLE_OUT_OF_MEMORY;
DEBUGASSERT(ssl_sessionid);
@@ -667,9 +665,20 @@ CURLcode Curl_ssl_addsessionid(struct Curl_cfilter *cf,
return CURLE_OK;
}
+ if(!Curl_ssl_getsessionid(cf, data, peer, &old_sessionid, &old_size)) {
+ if((old_size == idsize) &&
+ ((old_sessionid == ssl_sessionid) ||
+ (idsize && !memcmp(old_sessionid, ssl_sessionid, idsize)))) {
+ /* the very same */
+ sessionid_free_cb(ssl_sessionid, idsize);
+ return CURLE_OK;
+ }
+ Curl_ssl_delsessionid(data, old_sessionid);
+ }
+
store = &data->state.session[0];
oldest_age = data->state.session[0].age; /* zero if unused */
- DEBUGASSERT(ssl_config->primary.sessionid);
+ DEBUGASSERT(ssl_config->primary.cache_session);
(void)ssl_config;
clone_host = strdup(peer->hostname);
@@ -687,7 +696,7 @@ CURLcode Curl_ssl_addsessionid(struct Curl_cfilter *cf,
else
conn_to_port = -1;
- /* Now we should add the session ID and the host name to the cache, (remove
+ /* Now we should add the session ID and the hostname to the cache, (remove
the oldest if necessary) */
/* If using shared SSL session, lock! */
@@ -722,12 +731,12 @@ CURLcode Curl_ssl_addsessionid(struct Curl_cfilter *cf,
store->idsize = idsize;
store->sessionid_free = sessionid_free_cb;
store->age = *general_age; /* set current age */
- /* free it if there's one already present */
+ /* free it if there is one already present */
free(store->name);
free(store->conn_to_host);
- store->name = clone_host; /* clone host name */
+ store->name = clone_host; /* clone hostname */
clone_host = NULL;
- store->conn_to_host = clone_conn_to_host; /* clone connect to host name */
+ store->conn_to_host = clone_conn_to_host; /* clone connect to hostname */
clone_conn_to_host = NULL;
store->conn_to_port = conn_to_port; /* connect to port number */
/* port number */
@@ -753,12 +762,6 @@ out:
return CURLE_OK;
}
-void Curl_free_multi_ssl_backend_data(struct multi_ssl_backend_data *mbackend)
-{
- if(Curl_ssl->free_multi_ssl_backend_data && mbackend)
- Curl_ssl->free_multi_ssl_backend_data(mbackend);
-}
-
void Curl_ssl_close_all(struct Curl_easy *data)
{
/* kill the session ID cache if not shared */
@@ -778,11 +781,12 @@ void Curl_ssl_close_all(struct Curl_easy *data)
void Curl_ssl_adjust_pollset(struct Curl_cfilter *cf, struct Curl_easy *data,
struct easy_pollset *ps)
{
- if(!cf->connected) {
- struct ssl_connect_data *connssl = cf->ctx;
+ struct ssl_connect_data *connssl = cf->ctx;
+
+ if(connssl->io_need) {
curl_socket_t sock = Curl_conn_cf_get_socket(cf->next, data);
if(sock != CURL_SOCKET_BAD) {
- if(connssl->connecting_state == ssl_connect_2_writing) {
+ if(connssl->io_need & CURL_SSL_IO_NEED_SEND) {
Curl_pollset_set_out_only(data, ps, sock);
CURL_TRC_CF(data, cf, "adjust_pollset, POLLOUT fd=%"
CURL_FORMAT_SOCKET_T, sock);
@@ -1000,7 +1004,7 @@ CURLcode Curl_pin_peer_pubkey(struct Curl_easy *data,
(void)data;
#endif
- /* if a path wasn't specified, don't pin */
+ /* if a path was not specified, do not pin */
if(!pinnedpubkey)
return CURLE_OK;
if(!pubkey || !pubkeylen)
@@ -1048,7 +1052,7 @@ CURLcode Curl_pin_peer_pubkey(struct Curl_easy *data,
end_pos = strstr(begin_pos, ";sha256//");
/*
* if there is an end_pos, null terminate,
- * otherwise it'll go to the end of the original string
+ * otherwise it will go to the end of the original string
*/
if(end_pos)
end_pos[0] = '\0';
@@ -1094,7 +1098,7 @@ CURLcode Curl_pin_peer_pubkey(struct Curl_easy *data,
/*
* if the size of our certificate is bigger than the file
- * size then it can't match
+ * size then it cannot match
*/
size = curlx_sotouz((curl_off_t) filesize);
if(pubkeylen > size)
@@ -1112,7 +1116,7 @@ CURLcode Curl_pin_peer_pubkey(struct Curl_easy *data,
if((int) fread(buf, size, 1, fp) != 1)
break;
- /* If the sizes are the same, it can't be base64 encoded, must be der */
+ /* If the sizes are the same, it cannot be base64 encoded, must be der */
if(pubkeylen == size) {
if(!memcmp(pubkey, buf, pubkeylen))
result = CURLE_OK;
@@ -1120,18 +1124,18 @@ CURLcode Curl_pin_peer_pubkey(struct Curl_easy *data,
}
/*
- * Otherwise we will assume it's PEM and try to decode it
+ * Otherwise we will assume it is PEM and try to decode it
* after placing null terminator
*/
buf[size] = '\0';
pem_read = pubkey_pem_to_der((const char *)buf, &pem_ptr, &pem_len);
- /* if it wasn't read successfully, exit */
+ /* if it was not read successfully, exit */
if(pem_read)
break;
/*
- * if the size of our certificate doesn't match the size of
- * the decoded file, they can't be the same, otherwise compare
+ * if the size of our certificate does not match the size of
+ * the decoded file, they cannot be the same, otherwise compare
*/
if(pubkeylen == pem_len && !memcmp(pubkey, pem_ptr, pubkeylen))
result = CURLE_OK;
@@ -1173,12 +1177,18 @@ int Curl_none_init(void)
void Curl_none_cleanup(void)
{ }
-int Curl_none_shutdown(struct Curl_cfilter *cf UNUSED_PARAM,
- struct Curl_easy *data UNUSED_PARAM)
+CURLcode Curl_none_shutdown(struct Curl_cfilter *cf UNUSED_PARAM,
+ struct Curl_easy *data UNUSED_PARAM,
+ bool send_shutdown UNUSED_PARAM,
+ bool *done)
{
(void)data;
(void)cf;
- return 0;
+ (void)send_shutdown;
+ /* Every SSL backend should have a shutdown implementation. Until we
+ * have implemented that, we put this fake in place. */
+ *done = TRUE;
+ return CURLE_OK;
}
int Curl_none_check_cxn(struct Curl_cfilter *cf, struct Curl_easy *data)
@@ -1339,7 +1349,6 @@ static const struct Curl_ssl Curl_ssl_multi = {
NULL, /* sha256sum */
NULL, /* associate_connection */
NULL, /* disassociate_connection */
- NULL, /* free_multi_ssl_backend_data */
multissl_recv_plain, /* recv decrypted data */
multissl_send_plain, /* send data to encrypt */
};
@@ -1349,8 +1358,6 @@ const struct Curl_ssl *Curl_ssl =
&Curl_ssl_multi;
#elif defined(USE_WOLFSSL)
&Curl_ssl_wolfssl;
-#elif defined(USE_SECTRANSP)
- &Curl_ssl_sectransp;
#elif defined(USE_GNUTLS)
&Curl_ssl_gnutls;
#elif defined(USE_MBEDTLS)
@@ -1359,6 +1366,8 @@ const struct Curl_ssl *Curl_ssl =
&Curl_ssl_rustls;
#elif defined(USE_OPENSSL)
&Curl_ssl_openssl;
+#elif defined(USE_SECTRANSP)
+ &Curl_ssl_sectransp;
#elif defined(USE_SCHANNEL)
&Curl_ssl_schannel;
#elif defined(USE_BEARSSL)
@@ -1371,9 +1380,6 @@ static const struct Curl_ssl *available_backends[] = {
#if defined(USE_WOLFSSL)
&Curl_ssl_wolfssl,
#endif
-#if defined(USE_SECTRANSP)
- &Curl_ssl_sectransp,
-#endif
#if defined(USE_GNUTLS)
&Curl_ssl_gnutls,
#endif
@@ -1383,6 +1389,9 @@ static const struct Curl_ssl *available_backends[] = {
#if defined(USE_OPENSSL)
&Curl_ssl_openssl,
#endif
+#if defined(USE_SECTRANSP)
+ &Curl_ssl_sectransp,
+#endif
#if defined(USE_SCHANNEL)
&Curl_ssl_schannel,
#endif
@@ -1564,10 +1573,10 @@ CURLcode Curl_ssl_peer_init(struct ssl_peer *peer, struct Curl_cfilter *cf,
const char *ehostname, *edispname;
int eport;
- /* We need the hostname for SNI negotiation. Once handshaked, this
- * remains the SNI hostname for the TLS connection. But when the
- * connection is reused, the settings in cf->conn might change.
- * So we keep a copy of the hostname we use for SNI.
+ /* We need the hostname for SNI negotiation. Once handshaked, this remains
+ * the SNI hostname for the TLS connection. When the connection is reused,
+ * the settings in cf->conn might change. We keep a copy of the hostname we
+ * use for SNI.
*/
#ifndef CURL_DISABLE_PROXY
if(Curl_ssl_cf_is_proxy(cf)) {
@@ -1751,17 +1760,34 @@ static ssize_t ssl_cf_recv(struct Curl_cfilter *cf,
return nread;
}
-static void ssl_cf_adjust_pollset(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- struct easy_pollset *ps)
+static CURLcode ssl_cf_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ bool *done)
{
- struct cf_call_data save;
+ CURLcode result = CURLE_OK;
+
+ *done = TRUE;
+ if(!cf->shutdown) {
+ struct cf_call_data save;
- if(!cf->connected) {
CF_DATA_SAVE(save, cf, data);
- Curl_ssl->adjust_pollset(cf, data, ps);
+ result = Curl_ssl->shut_down(cf, data, TRUE, done);
+ CURL_TRC_CF(data, cf, "cf_shutdown -> %d, done=%d", result, *done);
CF_DATA_RESTORE(cf, save);
+ cf->shutdown = (result || *done);
}
+ return result;
+}
+
+static void ssl_cf_adjust_pollset(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct easy_pollset *ps)
+{
+ struct cf_call_data save;
+
+ CF_DATA_SAVE(save, cf, data);
+ Curl_ssl->adjust_pollset(cf, data, ps);
+ CF_DATA_RESTORE(cf, save);
}
static CURLcode ssl_cf_cntrl(struct Curl_cfilter *cf,
@@ -1851,6 +1877,7 @@ struct Curl_cftype Curl_cft_ssl = {
ssl_cf_destroy,
ssl_cf_connect,
ssl_cf_close,
+ ssl_cf_shutdown,
Curl_cf_def_get_host,
ssl_cf_adjust_pollset,
ssl_cf_data_pending,
@@ -1871,6 +1898,7 @@ struct Curl_cftype Curl_cft_ssl_proxy = {
ssl_cf_destroy,
ssl_cf_connect,
ssl_cf_close,
+ ssl_cf_shutdown,
Curl_cf_def_get_host,
ssl_cf_adjust_pollset,
ssl_cf_data_pending,
@@ -1982,10 +2010,10 @@ CURLcode Curl_cf_ssl_proxy_insert_after(struct Curl_cfilter *cf_at,
#endif /* !CURL_DISABLE_PROXY */
-bool Curl_ssl_supports(struct Curl_easy *data, int option)
+bool Curl_ssl_supports(struct Curl_easy *data, unsigned int ssl_option)
{
(void)data;
- return (Curl_ssl->supports & option)? TRUE : FALSE;
+ return (Curl_ssl->supports & ssl_option)? TRUE : FALSE;
}
static struct Curl_cfilter *get_ssl_filter(struct Curl_cfilter *cf)
@@ -2021,19 +2049,77 @@ void *Curl_ssl_get_internals(struct Curl_easy *data, int sockindex,
return result;
}
+static CURLcode vtls_shutdown_blocking(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ bool send_shutdown, bool *done)
+{
+ struct ssl_connect_data *connssl = cf->ctx;
+ struct cf_call_data save;
+ CURLcode result = CURLE_OK;
+ timediff_t timeout_ms;
+ int what, loop = 10;
+
+ if(cf->shutdown) {
+ *done = TRUE;
+ return CURLE_OK;
+ }
+ CF_DATA_SAVE(save, cf, data);
+
+ *done = FALSE;
+ while(!result && !*done && loop--) {
+ timeout_ms = Curl_shutdown_timeleft(cf->conn, cf->sockindex, NULL);
+
+ if(timeout_ms < 0) {
+ /* no need to continue if time is already up */
+ failf(data, "SSL shutdown timeout");
+ return CURLE_OPERATION_TIMEDOUT;
+ }
+
+ result = Curl_ssl->shut_down(cf, data, send_shutdown, done);
+ if(result ||*done)
+ goto out;
+
+ if(connssl->io_need) {
+ what = Curl_conn_cf_poll(cf, data, timeout_ms);
+ if(what < 0) {
+ /* fatal error */
+ failf(data, "select/poll on SSL socket, errno: %d", SOCKERRNO);
+ result = CURLE_RECV_ERROR;
+ goto out;
+ }
+ else if(0 == what) {
+ /* timeout */
+ failf(data, "SSL shutdown timeout");
+ result = CURLE_OPERATION_TIMEDOUT;
+ goto out;
+ }
+ /* socket is readable or writable */
+ }
+ }
+out:
+ CF_DATA_RESTORE(cf, save);
+ cf->shutdown = (result || *done);
+ return result;
+}
+
CURLcode Curl_ssl_cfilter_remove(struct Curl_easy *data,
- int sockindex)
+ int sockindex, bool send_shutdown)
{
struct Curl_cfilter *cf, *head;
CURLcode result = CURLE_OK;
- (void)data;
head = data->conn? data->conn->cfilter[sockindex] : NULL;
for(cf = head; cf; cf = cf->next) {
if(cf->cft == &Curl_cft_ssl) {
- if(Curl_ssl->shut_down(cf, data))
+ bool done;
+ CURL_TRC_CF(data, cf, "shutdown and remove SSL, start");
+ Curl_shutdown_start(data, sockindex, NULL);
+ result = vtls_shutdown_blocking(cf, data, send_shutdown, &done);
+ Curl_shutdown_clear(data, sockindex);
+ if(!result && !done) /* blocking failed? */
result = CURLE_SSL_SHUTDOWN_FAILED;
Curl_conn_cf_discard_sub(head, cf, data, FALSE);
+ CURL_TRC_CF(data, cf, "shutdown and remove SSL, done -> %d", result);
break;
}
}
diff --git a/libs/libcurl/src/vtls/vtls.h b/libs/libcurl/src/vtls/vtls.h
index 4eedc707e4..c81b048a68 100644
--- a/libs/libcurl/src/vtls/vtls.h
+++ b/libs/libcurl/src/vtls/vtls.h
@@ -38,6 +38,7 @@ struct Curl_ssl_session;
#define SSLSUPP_TLS13_CIPHERSUITES (1<<5) /* supports TLS 1.3 ciphersuites */
#define SSLSUPP_CAINFO_BLOB (1<<6)
#define SSLSUPP_ECH (1<<7)
+#define SSLSUPP_CA_CACHE (1<<8)
#define ALPN_ACCEPTED "ALPN: server accepted "
@@ -52,7 +53,6 @@ struct Curl_ssl_session;
/* Curl_multi SSL backend-specific data; declared differently by each SSL
backend */
-struct multi_ssl_backend_data;
struct Curl_cfilter;
CURLsslset Curl_init_sslset_nolock(curl_sslbackend id, const char *name,
@@ -181,8 +181,6 @@ bool Curl_ssl_cert_status_request(void);
bool Curl_ssl_false_start(struct Curl_easy *data);
-void Curl_free_multi_ssl_backend_data(struct multi_ssl_backend_data *mbackend);
-
#define SSL_SHUTDOWN_TIMEOUT 10000 /* ms */
CURLcode Curl_ssl_cfilter_add(struct Curl_easy *data,
@@ -193,7 +191,7 @@ CURLcode Curl_cf_ssl_insert_after(struct Curl_cfilter *cf_at,
struct Curl_easy *data);
CURLcode Curl_ssl_cfilter_remove(struct Curl_easy *data,
- int sockindex);
+ int sockindex, bool send_shutdown);
#ifndef CURL_DISABLE_PROXY
CURLcode Curl_cf_ssl_proxy_insert_after(struct Curl_cfilter *cf_at,
@@ -205,7 +203,7 @@ CURLcode Curl_cf_ssl_proxy_insert_after(struct Curl_cfilter *cf_at,
* Option is one of the defined SSLSUPP_* values.
* `data` maybe NULL for the features of the default implementation.
*/
-bool Curl_ssl_supports(struct Curl_easy *data, int ssl_option);
+bool Curl_ssl_supports(struct Curl_easy *data, unsigned int ssl_option);
/**
* Get the internal ssl instance (like OpenSSL's SSL*) from the filter
@@ -252,7 +250,7 @@ extern struct Curl_cftype Curl_cft_ssl_proxy;
#define Curl_ssl_get_internals(a,b,c,d) NULL
#define Curl_ssl_supports(a,b) FALSE
#define Curl_ssl_cfilter_add(a,b,c) CURLE_NOT_BUILT_IN
-#define Curl_ssl_cfilter_remove(a,b) CURLE_OK
+#define Curl_ssl_cfilter_remove(a,b,c) CURLE_OK
#define Curl_ssl_cf_get_config(a,b) NULL
#define Curl_ssl_cf_get_primary_config(a) NULL
#endif
diff --git a/libs/libcurl/src/vtls/vtls_int.h b/libs/libcurl/src/vtls/vtls_int.h
index d9f73f720d..7fc93c0ebe 100644
--- a/libs/libcurl/src/vtls/vtls_int.h
+++ b/libs/libcurl/src/vtls/vtls_int.h
@@ -64,15 +64,34 @@ CURLcode Curl_alpn_set_negotiated(struct Curl_cfilter *cf,
const unsigned char *proto,
size_t proto_len);
+/* enum for the nonblocking SSL connection state machine */
+typedef enum {
+ ssl_connect_1,
+ ssl_connect_2,
+ ssl_connect_3,
+ ssl_connect_done
+} ssl_connect_state;
+
+typedef enum {
+ ssl_connection_none,
+ ssl_connection_negotiating,
+ ssl_connection_complete
+} ssl_connection_state;
+
+#define CURL_SSL_IO_NEED_NONE (0)
+#define CURL_SSL_IO_NEED_RECV (1<<0)
+#define CURL_SSL_IO_NEED_SEND (1<<1)
+
/* Information in each SSL cfilter context: cf->ctx */
struct ssl_connect_data {
- ssl_connection_state state;
- ssl_connect_state connecting_state;
struct ssl_peer peer;
const struct alpn_spec *alpn; /* ALPN to use or NULL for none */
void *backend; /* vtls backend specific props */
struct cf_call_data call_data; /* data handle used in current call */
struct curltime handshake_done; /* time when handshake finished */
+ ssl_connection_state state;
+ ssl_connect_state connecting_state;
+ int io_need; /* TLS signals special SEND/RECV needs */
BIT(use_alpn); /* if ALPN shall be used in handshake */
BIT(peer_closed); /* peer has closed connection */
};
@@ -99,8 +118,8 @@ struct Curl_ssl {
size_t (*version)(char *buffer, size_t size);
int (*check_cxn)(struct Curl_cfilter *cf, struct Curl_easy *data);
- int (*shut_down)(struct Curl_cfilter *cf,
- struct Curl_easy *data);
+ CURLcode (*shut_down)(struct Curl_cfilter *cf, struct Curl_easy *data,
+ bool send_shutdown, bool *done);
bool (*data_pending)(struct Curl_cfilter *cf,
const struct Curl_easy *data);
@@ -115,9 +134,8 @@ struct Curl_ssl {
struct Curl_easy *data,
bool *done);
- /* During handshake, adjust the pollset to include the socket
- * for POLLOUT or POLLIN as needed.
- * Mandatory. */
+ /* During handshake/shutdown, adjust the pollset to include the socket
+ * for POLLOUT or POLLIN as needed. Mandatory. */
void (*adjust_pollset)(struct Curl_cfilter *cf, struct Curl_easy *data,
struct easy_pollset *ps);
void *(*get_internals)(struct ssl_connect_data *connssl, CURLINFO info);
@@ -135,8 +153,6 @@ struct Curl_ssl {
bool (*attach_data)(struct Curl_cfilter *cf, struct Curl_easy *data);
void (*detach_data)(struct Curl_cfilter *cf, struct Curl_easy *data);
- void (*free_multi_ssl_backend_data)(struct multi_ssl_backend_data *mbackend);
-
ssize_t (*recv_plain)(struct Curl_cfilter *cf, struct Curl_easy *data,
char *buf, size_t len, CURLcode *code);
ssize_t (*send_plain)(struct Curl_cfilter *cf, struct Curl_easy *data,
@@ -149,7 +165,8 @@ extern const struct Curl_ssl *Curl_ssl;
int Curl_none_init(void);
void Curl_none_cleanup(void);
-int Curl_none_shutdown(struct Curl_cfilter *cf, struct Curl_easy *data);
+CURLcode Curl_none_shutdown(struct Curl_cfilter *cf, struct Curl_easy *data,
+ bool send_shutdown, bool *done);
int Curl_none_check_cxn(struct Curl_cfilter *cf, struct Curl_easy *data);
CURLcode Curl_none_random(struct Curl_easy *data, unsigned char *entropy,
size_t length);
@@ -181,19 +198,22 @@ bool Curl_ssl_getsessionid(struct Curl_cfilter *cf,
const struct ssl_peer *peer,
void **ssl_sessionid,
size_t *idsize); /* set 0 if unknown */
-/* add a new session ID
+
+/* Set a TLS session ID for `peer`. Replaces an existing session ID if
+ * not already the very same.
* Sessionid mutex must be locked (see Curl_ssl_sessionid_lock).
+ * Call takes ownership of `ssl_sessionid`, using `sessionid_free_cb`
+ * to deallocate it. Is called in all outcomes, either right away or
+ * later when the session cache is cleaned up.
* Caller must ensure that it has properly shared ownership of this sessionid
* object with cache (e.g. incrementing refcount on success)
- * Call takes ownership of `ssl_sessionid`, using `sessionid_free_cb`
- * to destroy it in case of failure or later removal.
*/
-CURLcode Curl_ssl_addsessionid(struct Curl_cfilter *cf,
- struct Curl_easy *data,
- const struct ssl_peer *peer,
- void *ssl_sessionid,
- size_t idsize,
- Curl_ssl_sessionid_dtor *sessionid_free_cb);
+CURLcode Curl_ssl_set_sessionid(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ const struct ssl_peer *peer,
+ void *sessionid,
+ size_t sessionid_size,
+ Curl_ssl_sessionid_dtor *sessionid_free_cb);
#include "openssl.h" /* OpenSSL versions */
#include "gtls.h" /* GnuTLS versions */
diff --git a/libs/libcurl/src/vtls/wolfssl.c b/libs/libcurl/src/vtls/wolfssl.c
index 53e7f0e895..a305b5f080 100644
--- a/libs/libcurl/src/vtls/wolfssl.c
+++ b/libs/libcurl/src/vtls/wolfssl.c
@@ -99,17 +99,11 @@
#undef USE_BIO_CHAIN
#endif
-struct wolfssl_ssl_backend_data {
- WOLFSSL_CTX *ctx;
- WOLFSSL *handle;
- CURLcode io_result; /* result of last BIO cfilter operation */
-};
-
#ifdef OPENSSL_EXTRA
/*
* Availability note:
* The TLS 1.3 secret callback (wolfSSL_set_tls13_secret_cb) was added in
- * WolfSSL 4.4.0, but requires the -DHAVE_SECRET_CALLBACK build option. If that
+ * wolfSSL 4.4.0, but requires the -DHAVE_SECRET_CALLBACK build option. If that
* option is not set, then TLS 1.3 will not be logged.
* For TLS 1.2 and before, we use wolfSSL_get_keys().
* SSL_get_client_random and wolfSSL_get_keys require OPENSSL_EXTRA
@@ -290,8 +284,8 @@ static int wolfssl_bio_cf_out_write(WOLFSSL_BIO *bio,
{
struct Curl_cfilter *cf = wolfSSL_BIO_get_data(bio);
struct ssl_connect_data *connssl = cf->ctx;
- struct wolfssl_ssl_backend_data *backend =
- (struct wolfssl_ssl_backend_data *)connssl->backend;
+ struct wolfssl_ctx *backend =
+ (struct wolfssl_ctx *)connssl->backend;
struct Curl_easy *data = CF_DATA_CURRENT(cf);
ssize_t nwritten;
CURLcode result = CURLE_OK;
@@ -311,8 +305,8 @@ static int wolfssl_bio_cf_in_read(WOLFSSL_BIO *bio, char *buf, int blen)
{
struct Curl_cfilter *cf = wolfSSL_BIO_get_data(bio);
struct ssl_connect_data *connssl = cf->ctx;
- struct wolfssl_ssl_backend_data *backend =
- (struct wolfssl_ssl_backend_data *)connssl->backend;
+ struct wolfssl_ctx *backend =
+ (struct wolfssl_ctx *)connssl->backend;
struct Curl_easy *data = CF_DATA_CURRENT(cf);
ssize_t nread;
CURLcode result = CURLE_OK;
@@ -357,6 +351,252 @@ static void wolfssl_bio_cf_free_methods(void)
#endif /* !USE_BIO_CHAIN */
+static CURLcode populate_x509_store(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ X509_STORE *store,
+ struct wolfssl_ctx *wssl)
+{
+ struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
+ const struct curl_blob *ca_info_blob = conn_config->ca_info_blob;
+ const char * const ssl_cafile =
+ /* CURLOPT_CAINFO_BLOB overrides CURLOPT_CAINFO */
+ (ca_info_blob ? NULL : conn_config->CAfile);
+ const char * const ssl_capath = conn_config->CApath;
+ struct ssl_config_data *ssl_config = Curl_ssl_cf_get_config(cf, data);
+ bool imported_native_ca = false;
+
+#if !defined(NO_FILESYSTEM) && defined(WOLFSSL_SYS_CA_CERTS)
+ /* load native CA certificates */
+ if(ssl_config->native_ca_store) {
+ if(wolfSSL_CTX_load_system_CA_certs(wssl->ctx) != WOLFSSL_SUCCESS) {
+ infof(data, "error importing native CA store, continuing anyway");
+ }
+ else {
+ imported_native_ca = true;
+ infof(data, "successfully imported native CA store");
+ wssl->x509_store_setup = TRUE;
+ }
+ }
+#endif /* !NO_FILESYSTEM */
+
+ /* load certificate blob */
+ if(ca_info_blob) {
+ if(wolfSSL_CTX_load_verify_buffer(wssl->ctx, ca_info_blob->data,
+ (long)ca_info_blob->len,
+ SSL_FILETYPE_PEM) != SSL_SUCCESS) {
+ if(imported_native_ca) {
+ infof(data, "error importing CA certificate blob, continuing anyway");
+ }
+ else {
+ failf(data, "error importing CA certificate blob");
+ return CURLE_SSL_CACERT_BADFILE;
+ }
+ }
+ else {
+ infof(data, "successfully imported CA certificate blob");
+ wssl->x509_store_setup = TRUE;
+ }
+ }
+
+#ifndef NO_FILESYSTEM
+ /* load trusted cacert from file if not blob */
+
+ CURL_TRC_CF(data, cf, "populate_x509_store, path=%s, blob=%d",
+ ssl_cafile? ssl_cafile : "none", !!ca_info_blob);
+ if(!store)
+ return CURLE_OUT_OF_MEMORY;
+
+ if((ssl_cafile || ssl_capath) && (!wssl->x509_store_setup)) {
+ int rc =
+ wolfSSL_CTX_load_verify_locations_ex(wssl->ctx,
+ ssl_cafile,
+ ssl_capath,
+ WOLFSSL_LOAD_FLAG_IGNORE_ERR);
+ if(SSL_SUCCESS != rc) {
+ if(conn_config->verifypeer) {
+ /* Fail if we insist on successfully verifying the server. */
+ failf(data, "error setting certificate verify locations:"
+ " CAfile: %s CApath: %s",
+ ssl_cafile ? ssl_cafile : "none",
+ ssl_capath ? ssl_capath : "none");
+ return CURLE_SSL_CACERT_BADFILE;
+ }
+ else {
+ /* Just continue with a warning if no strict certificate
+ verification is required. */
+ infof(data, "error setting certificate verify locations,"
+ " continuing anyway:");
+ }
+ }
+ else {
+ /* Everything is fine. */
+ infof(data, "successfully set certificate verify locations:");
+ }
+ infof(data, " CAfile: %s", ssl_cafile ? ssl_cafile : "none");
+ infof(data, " CApath: %s", ssl_capath ? ssl_capath : "none");
+ wssl->x509_store_setup = TRUE;
+ }
+#endif
+ (void)store;
+ return CURLE_OK;
+}
+
+/* key to use at `multi->proto_hash` */
+#define MPROTO_WSSL_X509_KEY "tls:wssl:x509:share"
+
+struct wssl_x509_share {
+ char *CAfile; /* CAfile path used to generate X509 store */
+ WOLFSSL_X509_STORE *store; /* cached X509 store or NULL if none */
+ struct curltime time; /* when the cached store was created */
+};
+
+static void wssl_x509_share_free(void *key, size_t key_len, void *p)
+{
+ struct wssl_x509_share *share = p;
+ DEBUGASSERT(key_len == (sizeof(MPROTO_WSSL_X509_KEY)-1));
+ DEBUGASSERT(!memcmp(MPROTO_WSSL_X509_KEY, key, key_len));
+ (void)key;
+ (void)key_len;
+ if(share->store) {
+ wolfSSL_X509_STORE_free(share->store);
+ }
+ free(share->CAfile);
+ free(share);
+}
+
+static bool
+cached_x509_store_expired(const struct Curl_easy *data,
+ const struct wssl_x509_share *mb)
+{
+ const struct ssl_general_config *cfg = &data->set.general_ssl;
+ struct curltime now = Curl_now();
+ timediff_t elapsed_ms = Curl_timediff(now, mb->time);
+ timediff_t timeout_ms = cfg->ca_cache_timeout * (timediff_t)1000;
+
+ if(timeout_ms < 0)
+ return false;
+
+ return elapsed_ms >= timeout_ms;
+}
+
+static bool
+cached_x509_store_different(struct Curl_cfilter *cf,
+ const struct wssl_x509_share *mb)
+{
+ struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
+ if(!mb->CAfile || !conn_config->CAfile)
+ return mb->CAfile != conn_config->CAfile;
+
+ return strcmp(mb->CAfile, conn_config->CAfile);
+}
+
+static X509_STORE *get_cached_x509_store(struct Curl_cfilter *cf,
+ const struct Curl_easy *data)
+{
+ struct Curl_multi *multi = data->multi;
+ struct wssl_x509_share *share;
+ WOLFSSL_X509_STORE *store = NULL;
+
+ DEBUGASSERT(multi);
+ share = multi? Curl_hash_pick(&multi->proto_hash,
+ (void *)MPROTO_WSSL_X509_KEY,
+ sizeof(MPROTO_WSSL_X509_KEY)-1) : NULL;
+ if(share && share->store &&
+ !cached_x509_store_expired(data, share) &&
+ !cached_x509_store_different(cf, share)) {
+ store = share->store;
+ }
+
+ return store;
+}
+
+static void set_cached_x509_store(struct Curl_cfilter *cf,
+ const struct Curl_easy *data,
+ X509_STORE *store)
+{
+ struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
+ struct Curl_multi *multi = data->multi;
+ struct wssl_x509_share *share;
+
+ DEBUGASSERT(multi);
+ if(!multi)
+ return;
+ share = Curl_hash_pick(&multi->proto_hash,
+ (void *)MPROTO_WSSL_X509_KEY,
+ sizeof(MPROTO_WSSL_X509_KEY)-1);
+
+ if(!share) {
+ share = calloc(1, sizeof(*share));
+ if(!share)
+ return;
+ if(!Curl_hash_add2(&multi->proto_hash,
+ (void *)MPROTO_WSSL_X509_KEY,
+ sizeof(MPROTO_WSSL_X509_KEY)-1,
+ share, wssl_x509_share_free)) {
+ free(share);
+ return;
+ }
+ }
+
+ if(wolfSSL_X509_STORE_up_ref(store)) {
+ char *CAfile = NULL;
+
+ if(conn_config->CAfile) {
+ CAfile = strdup(conn_config->CAfile);
+ if(!CAfile) {
+ X509_STORE_free(store);
+ return;
+ }
+ }
+
+ if(share->store) {
+ X509_STORE_free(share->store);
+ free(share->CAfile);
+ }
+
+ share->time = Curl_now();
+ share->store = store;
+ share->CAfile = CAfile;
+ }
+}
+
+CURLcode Curl_wssl_setup_x509_store(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct wolfssl_ctx *wssl)
+{
+ struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
+ struct ssl_config_data *ssl_config = Curl_ssl_cf_get_config(cf, data);
+ CURLcode result = CURLE_OK;
+ WOLFSSL_X509_STORE *cached_store;
+ bool cache_criteria_met;
+
+ /* Consider the X509 store cacheable if it comes exclusively from a CAfile,
+ or no source is provided and we are falling back to OpenSSL's built-in
+ default. */
+ cache_criteria_met = (data->set.general_ssl.ca_cache_timeout != 0) &&
+ conn_config->verifypeer &&
+ !conn_config->CApath &&
+ !conn_config->ca_info_blob &&
+ !ssl_config->primary.CRLfile &&
+ !ssl_config->native_ca_store;
+
+ cached_store = get_cached_x509_store(cf, data);
+ if(cached_store && cache_criteria_met
+ && wolfSSL_X509_STORE_up_ref(cached_store)) {
+ wolfSSL_CTX_set_cert_store(wssl->ctx, cached_store);
+ }
+ else {
+ X509_STORE *store = wolfSSL_CTX_get_cert_store(wssl->ctx);
+
+ result = populate_x509_store(cf, data, store, wssl);
+ if(result == CURLE_OK && cache_criteria_met) {
+ set_cached_x509_store(cf, data, store);
+ }
+ }
+
+ return result;
+}
+
/*
* This function loads all the client/CA certificates and CRLs. Setup the TLS
* layer and do all necessary magic.
@@ -366,15 +606,10 @@ wolfssl_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
{
char *ciphers, *curves;
struct ssl_connect_data *connssl = cf->ctx;
- struct wolfssl_ssl_backend_data *backend =
- (struct wolfssl_ssl_backend_data *)connssl->backend;
+ struct wolfssl_ctx *backend =
+ (struct wolfssl_ctx *)connssl->backend;
struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
- const struct curl_blob *ca_info_blob = conn_config->ca_info_blob;
const struct ssl_config_data *ssl_config = Curl_ssl_cf_get_config(cf, data);
- const char * const ssl_cafile =
- /* CURLOPT_CAINFO_BLOB overrides CURLOPT_CAINFO */
- (ca_info_blob ? NULL : conn_config->CAfile);
- const char * const ssl_capath = conn_config->CApath;
WOLFSSL_METHOD* req_method = NULL;
#ifdef HAVE_LIBOQS
word16 oqsAlg = 0;
@@ -386,8 +621,6 @@ wolfssl_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
#else
#define use_sni(x) Curl_nop_stmt
#endif
- bool imported_native_ca = false;
- bool imported_ca_info_blob = false;
DEBUGASSERT(backend);
@@ -399,7 +632,7 @@ wolfssl_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
return CURLE_SSL_CONNECT_ERROR;
}
- /* check to see if we've been told to use an explicit SSL/TLS version */
+ /* check to see if we have been told to use an explicit SSL/TLS version */
switch(conn_config->version) {
case CURL_SSLVERSION_DEFAULT:
case CURL_SSLVERSION_TLSv1:
@@ -426,11 +659,11 @@ wolfssl_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
#ifndef NO_OLD_TLS
req_method = TLSv1_1_client_method();
use_sni(TRUE);
+ break;
#else
failf(data, "wolfSSL does not support TLS 1.1");
return CURLE_NOT_BUILT_IN;
#endif
- break;
case CURL_SSLVERSION_TLSv1_2:
#ifndef WOLFSSL_NO_TLS12
req_method = TLSv1_2_client_method();
@@ -455,7 +688,7 @@ wolfssl_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
}
if(!req_method) {
- failf(data, "SSL: couldn't create a method");
+ failf(data, "SSL: could not create a method");
return CURLE_OUT_OF_MEMORY;
}
@@ -464,7 +697,7 @@ wolfssl_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
backend->ctx = wolfSSL_CTX_new(req_method);
if(!backend->ctx) {
- failf(data, "SSL: couldn't create a context");
+ failf(data, "SSL: could not create a context");
return CURLE_OUT_OF_MEMORY;
}
@@ -485,7 +718,7 @@ wolfssl_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
&& (wolfSSL_CTX_SetMinVersion(backend->ctx, WOLFSSL_TLSV1_3) != 1)
#endif
) {
- failf(data, "SSL: couldn't set the minimum protocol version");
+ failf(data, "SSL: could not set the minimum protocol version");
return CURLE_SSL_CONNECT_ERROR;
}
#endif
@@ -524,73 +757,10 @@ wolfssl_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
}
}
-#if !defined(NO_FILESYSTEM) && defined(WOLFSSL_SYS_CA_CERTS)
- /* load native CA certificates */
- if(ssl_config->native_ca_store) {
- if(wolfSSL_CTX_load_system_CA_certs(backend->ctx) != WOLFSSL_SUCCESS) {
- infof(data, "error importing native CA store, continuing anyway");
- }
- else {
- imported_native_ca = true;
- infof(data, "successfully imported native CA store");
- }
- }
-#endif /* !NO_FILESYSTEM */
-
- /* load certificate blob */
- if(ca_info_blob) {
- if(wolfSSL_CTX_load_verify_buffer(backend->ctx, ca_info_blob->data,
- ca_info_blob->len,
- SSL_FILETYPE_PEM) != SSL_SUCCESS) {
- if(imported_native_ca) {
- infof(data, "error importing CA certificate blob, continuing anyway");
- }
- else {
- failf(data, "error importing CA certificate blob");
- return CURLE_SSL_CACERT_BADFILE;
- }
- }
- else {
- imported_ca_info_blob = true;
- infof(data, "successfully imported CA certificate blob");
- }
- }
-
#ifndef NO_FILESYSTEM
- /* load trusted cacert from file if not blob */
- if(ssl_cafile || ssl_capath) {
- int rc =
- wolfSSL_CTX_load_verify_locations_ex(backend->ctx,
- ssl_cafile,
- ssl_capath,
- WOLFSSL_LOAD_FLAG_IGNORE_ERR);
- if(SSL_SUCCESS != rc) {
- if(conn_config->verifypeer && !imported_ca_info_blob &&
- !imported_native_ca) {
- /* Fail if we insist on successfully verifying the server. */
- failf(data, "error setting certificate verify locations:"
- " CAfile: %s CApath: %s",
- ssl_cafile ? ssl_cafile : "none",
- ssl_capath ? ssl_capath : "none");
- return CURLE_SSL_CACERT_BADFILE;
- }
- else {
- /* Just continue with a warning if no strict certificate
- verification is required. */
- infof(data, "error setting certificate verify locations,"
- " continuing anyway:");
- }
- }
- else {
- /* Everything is fine. */
- infof(data, "successfully set certificate verify locations:");
- }
- infof(data, " CAfile: %s", ssl_cafile ? ssl_cafile : "none");
- infof(data, " CApath: %s", ssl_capath ? ssl_capath : "none");
- }
-
/* Load the client certificate, and private key */
- if(ssl_config->primary.clientcert && ssl_config->key) {
+ if(ssl_config->primary.clientcert) {
+ char *key_file = ssl_config->key;
int file_type = do_file_type(ssl_config->cert_type);
if(file_type == WOLFSSL_FILETYPE_PEM) {
@@ -614,8 +784,12 @@ wolfssl_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
return CURLE_BAD_FUNCTION_ARGUMENT;
}
- file_type = do_file_type(ssl_config->key_type);
- if(wolfSSL_CTX_use_PrivateKey_file(backend->ctx, ssl_config->key,
+ if(!key_file)
+ key_file = ssl_config->primary.clientcert;
+ else
+ file_type = do_file_type(ssl_config->key_type);
+
+ if(wolfSSL_CTX_use_PrivateKey_file(backend->ctx, key_file,
file_type) != 1) {
failf(data, "unable to set private key");
return CURLE_SSL_CONNECT_ERROR;
@@ -656,7 +830,7 @@ wolfssl_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
}
#ifdef NO_FILESYSTEM
else if(conn_config->verifypeer) {
- failf(data, "SSL: Certificates can't be loaded because wolfSSL was built"
+ failf(data, "SSL: Certificates cannot be loaded because wolfSSL was built"
" with \"no filesystem\". Either disable peer verification"
" (insecure) or if you are building an application with libcurl you"
" can load certificates via CURLOPT_SSL_CTX_FUNCTION.");
@@ -669,7 +843,7 @@ wolfssl_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
wolfSSL_free(backend->handle);
backend->handle = wolfSSL_new(backend->ctx);
if(!backend->handle) {
- failf(data, "SSL: couldn't create a handle");
+ failf(data, "SSL: could not create a handle");
return CURLE_OUT_OF_MEMORY;
}
@@ -688,7 +862,8 @@ wolfssl_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
result = Curl_alpn_to_proto_str(&proto, connssl->alpn);
if(result ||
- wolfSSL_UseALPN(backend->handle, (char *)proto.data, proto.len,
+ wolfSSL_UseALPN(backend->handle,
+ (char *)proto.data, (unsigned int)proto.len,
WOLFSSL_ALPN_CONTINUE_ON_MISMATCH) != SSL_SUCCESS) {
failf(data, "SSL: failed setting ALPN protocols");
return CURLE_SSL_CONNECT_ERROR;
@@ -715,8 +890,8 @@ wolfssl_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
}
#endif /* HAVE_SECURE_RENEGOTIATION */
- /* Check if there's a cached ID we can/should use here! */
- if(ssl_config->primary.sessionid) {
+ /* Check if there is a cached ID we can/should use here! */
+ if(ssl_config->primary.cache_session) {
void *ssl_sessionid = NULL;
Curl_ssl_sessionid_lock(data);
@@ -725,7 +900,7 @@ wolfssl_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
/* we got a session id, use it! */
if(!SSL_set_session(backend->handle, ssl_sessionid)) {
Curl_ssl_delsessionid(data, ssl_sessionid);
- infof(data, "Can't use session ID, going on without");
+ infof(data, "cannot use session ID, going on without");
}
else
infof(data, "SSL reusing session ID");
@@ -738,7 +913,7 @@ wolfssl_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
int trying_ech_now = 0;
if(data->set.str[STRING_ECH_PUBLIC]) {
- infof(data, "ECH: outername not (yet) supported with WolfSSL");
+ infof(data, "ECH: outername not (yet) supported with wolfSSL");
return CURLE_SSL_CONNECT_ERROR;
}
if(data->set.tls_ech == CURLECH_GREASE) {
@@ -802,7 +977,7 @@ wolfssl_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
if(trying_ech_now
&& SSL_set_min_proto_version(backend->handle, TLS1_3_VERSION) != 1) {
- infof(data, "ECH: Can't force TLSv1.3 [ERROR]");
+ infof(data, "ECH: cannot force TLSv1.3 [ERROR]");
return CURLE_SSL_CONNECT_ERROR;
}
@@ -834,13 +1009,31 @@ wolfssl_connect_step1(struct Curl_cfilter *cf, struct Curl_easy *data)
}
+static char *wolfssl_strerror(unsigned long error, char *buf,
+ unsigned long size)
+{
+ DEBUGASSERT(size);
+ *buf = '\0';
+
+ wolfSSL_ERR_error_string_n(error, buf, size);
+
+ if(!*buf) {
+ const char *msg = error ? "Unknown error" : "No error";
+ strncpy(buf, msg, size - 1);
+ buf[size - 1] = '\0';
+ }
+
+ return buf;
+}
+
+
static CURLcode
wolfssl_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data)
{
int ret = -1;
struct ssl_connect_data *connssl = cf->ctx;
- struct wolfssl_ssl_backend_data *backend =
- (struct wolfssl_ssl_backend_data *)connssl->backend;
+ struct wolfssl_ctx *backend =
+ (struct wolfssl_ctx *)connssl->backend;
struct ssl_primary_config *conn_config = Curl_ssl_cf_get_primary_config(cf);
#ifndef CURL_DISABLE_PROXY
const char * const pinnedpubkey = Curl_ssl_cf_is_proxy(cf)?
@@ -862,6 +1055,22 @@ wolfssl_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data)
return CURLE_SSL_CONNECT_ERROR;
}
+ if(!backend->x509_store_setup) {
+ /* After having send off the ClientHello, we prepare the x509
+ * store to verify the coming certificate from the server */
+ CURLcode result;
+ struct wolfssl_ctx wssl;
+ wssl.ctx = backend->ctx;
+ wssl.handle = backend->handle;
+ wssl.io_result = CURLE_OK;
+ wssl.x509_store_setup = FALSE;
+ result = Curl_wssl_setup_x509_store(cf, data, &wssl);
+ if(result)
+ return result;
+ backend->x509_store_setup = wssl.x509_store_setup;
+ }
+
+ connssl->io_need = CURL_SSL_IO_NEED_NONE;
ret = wolfSSL_connect(backend->handle);
#ifdef OPENSSL_EXTRA
@@ -889,15 +1098,14 @@ wolfssl_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data)
#endif /* OPENSSL_EXTRA */
if(ret != 1) {
- char error_buffer[WOLFSSL_MAX_ERROR_SZ];
- int detail = wolfSSL_get_error(backend->handle, ret);
+ int detail = wolfSSL_get_error(backend->handle, ret);
if(SSL_ERROR_WANT_READ == detail) {
- connssl->connecting_state = ssl_connect_2_reading;
+ connssl->io_need = CURL_SSL_IO_NEED_RECV;
return CURLE_OK;
}
else if(SSL_ERROR_WANT_WRITE == detail) {
- connssl->connecting_state = ssl_connect_2_writing;
+ connssl->io_need = CURL_SSL_IO_NEED_SEND;
return CURLE_OK;
}
/* There is no easy way to override only the CN matching.
@@ -950,7 +1158,7 @@ wolfssl_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data)
word32 echConfigsLen = 1000;
int rv = 0;
- /* this currently doesn't produce the retry_configs */
+ /* this currently does not produce the retry_configs */
rv = wolfSSL_GetEchConfigs(backend->handle, echConfigs,
&echConfigsLen);
if(rv != WOLFSSL_SUCCESS) {
@@ -972,8 +1180,10 @@ wolfssl_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data)
return CURLE_OK;
}
else {
+ char error_buffer[256];
failf(data, "SSL_connect failed with error %d: %s", detail,
- wolfSSL_ERR_error_string(detail, error_buffer));
+ wolfssl_strerror((unsigned long)detail, error_buffer,
+ sizeof(error_buffer)));
return CURLE_SSL_CONNECT_ERROR;
}
}
@@ -1070,31 +1280,23 @@ wolfssl_connect_step3(struct Curl_cfilter *cf, struct Curl_easy *data)
{
CURLcode result = CURLE_OK;
struct ssl_connect_data *connssl = cf->ctx;
- struct wolfssl_ssl_backend_data *backend =
- (struct wolfssl_ssl_backend_data *)connssl->backend;
+ struct wolfssl_ctx *backend =
+ (struct wolfssl_ctx *)connssl->backend;
const struct ssl_config_data *ssl_config = Curl_ssl_cf_get_config(cf, data);
DEBUGASSERT(ssl_connect_3 == connssl->connecting_state);
DEBUGASSERT(backend);
- if(ssl_config->primary.sessionid) {
+ if(ssl_config->primary.cache_session) {
/* wolfSSL_get1_session allocates memory that has to be freed. */
WOLFSSL_SESSION *our_ssl_sessionid = wolfSSL_get1_session(backend->handle);
if(our_ssl_sessionid) {
- void *old_ssl_sessionid = NULL;
- bool incache;
Curl_ssl_sessionid_lock(data);
- incache = !(Curl_ssl_getsessionid(cf, data, &connssl->peer,
- &old_ssl_sessionid, NULL));
- if(incache) {
- Curl_ssl_delsessionid(data, old_ssl_sessionid);
- }
-
/* call takes ownership of `our_ssl_sessionid` */
- result = Curl_ssl_addsessionid(cf, data, &connssl->peer,
- our_ssl_sessionid, 0,
- wolfssl_session_free);
+ result = Curl_ssl_set_sessionid(cf, data, &connssl->peer,
+ our_ssl_sessionid, 0,
+ wolfssl_session_free);
Curl_ssl_sessionid_unlock(data);
if(result) {
failf(data, "failed to store ssl session");
@@ -1116,9 +1318,8 @@ static ssize_t wolfssl_send(struct Curl_cfilter *cf,
CURLcode *curlcode)
{
struct ssl_connect_data *connssl = cf->ctx;
- struct wolfssl_ssl_backend_data *backend =
- (struct wolfssl_ssl_backend_data *)connssl->backend;
- char error_buffer[WOLFSSL_MAX_ERROR_SZ];
+ struct wolfssl_ctx *backend =
+ (struct wolfssl_ctx *)connssl->backend;
int memlen = (len > (size_t)INT_MAX) ? INT_MAX : (int)len;
int rc;
@@ -1133,7 +1334,7 @@ static ssize_t wolfssl_send(struct Curl_cfilter *cf,
switch(err) {
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
- /* there's data pending, re-invoke SSL_write() */
+ /* there is data pending, re-invoke SSL_write() */
CURL_TRC_CF(data, cf, "wolfssl_send(len=%zu) -> AGAIN", len);
*curlcode = CURLE_AGAIN;
return -1;
@@ -1144,9 +1345,13 @@ static ssize_t wolfssl_send(struct Curl_cfilter *cf,
return -1;
}
CURL_TRC_CF(data, cf, "wolfssl_send(len=%zu) -> %d, %d", len, rc, err);
- failf(data, "SSL write: %s, errno %d",
- wolfSSL_ERR_error_string(err, error_buffer),
- SOCKERRNO);
+ {
+ char error_buffer[256];
+ failf(data, "SSL write: %s, errno %d",
+ wolfssl_strerror((unsigned long)err, error_buffer,
+ sizeof(error_buffer)),
+ SOCKERRNO);
+ }
*curlcode = CURLE_SEND_ERROR;
return -1;
}
@@ -1155,23 +1360,111 @@ static ssize_t wolfssl_send(struct Curl_cfilter *cf,
return rc;
}
+static CURLcode wolfssl_shutdown(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ bool send_shutdown, bool *done)
+{
+ struct ssl_connect_data *connssl = cf->ctx;
+ struct wolfssl_ctx *wctx = (struct wolfssl_ctx *)connssl->backend;
+ CURLcode result = CURLE_OK;
+ char buf[1024];
+ int nread, err;
+
+ DEBUGASSERT(wctx);
+ if(!wctx->handle || cf->shutdown) {
+ *done = TRUE;
+ goto out;
+ }
+
+ connssl->io_need = CURL_SSL_IO_NEED_NONE;
+ *done = FALSE;
+ if(!(wolfSSL_get_shutdown(wctx->handle) & SSL_SENT_SHUTDOWN)) {
+ /* We have not started the shutdown from our side yet. Check
+ * if the server already sent us one. */
+ ERR_clear_error();
+ nread = wolfSSL_read(wctx->handle, buf, (int)sizeof(buf));
+ err = wolfSSL_get_error(wctx->handle, nread);
+ if(!nread && err == SSL_ERROR_ZERO_RETURN) {
+ bool input_pending;
+ /* Yes, it did. */
+ if(!send_shutdown) {
+ CURL_TRC_CF(data, cf, "SSL shutdown received, not sending");
+ *done = TRUE;
+ goto out;
+ }
+ else if(!cf->next->cft->is_alive(cf->next, data, &input_pending)) {
+ /* Server closed the connection after its closy notify. It
+ * seems not interested to see our close notify, so do not
+ * send it. We are done. */
+ CURL_TRC_CF(data, cf, "peer closed connection");
+ connssl->peer_closed = TRUE;
+ *done = TRUE;
+ goto out;
+ }
+ }
+ }
+
+ if(send_shutdown && wolfSSL_shutdown(wctx->handle) == 1) {
+ CURL_TRC_CF(data, cf, "SSL shutdown finished");
+ *done = TRUE;
+ goto out;
+ }
+ else {
+ size_t i;
+ /* SSL should now have started the shutdown from our side. Since it
+ * was not complete, we are lacking the close notify from the server. */
+ for(i = 0; i < 10; ++i) {
+ ERR_clear_error();
+ nread = wolfSSL_read(wctx->handle, buf, (int)sizeof(buf));
+ if(nread <= 0)
+ break;
+ }
+ err = wolfSSL_get_error(wctx->handle, nread);
+ switch(err) {
+ case SSL_ERROR_ZERO_RETURN: /* no more data */
+ CURL_TRC_CF(data, cf, "SSL shutdown received");
+ *done = TRUE;
+ break;
+ case SSL_ERROR_NONE: /* just did not get anything */
+ case SSL_ERROR_WANT_READ:
+ /* SSL has send its notify and now wants to read the reply
+ * from the server. We are not really interested in that. */
+ CURL_TRC_CF(data, cf, "SSL shutdown sent, want receive");
+ connssl->io_need = CURL_SSL_IO_NEED_RECV;
+ break;
+ case SSL_ERROR_WANT_WRITE:
+ CURL_TRC_CF(data, cf, "SSL shutdown send blocked");
+ connssl->io_need = CURL_SSL_IO_NEED_SEND;
+ break;
+ default: {
+ char error_buffer[256];
+ int detail = wolfSSL_get_error(wctx->handle, err);
+ CURL_TRC_CF(data, cf, "SSL shutdown, error: '%s'(%d)",
+ wolfssl_strerror((unsigned long)err, error_buffer,
+ sizeof(error_buffer)),
+ detail);
+ result = CURLE_RECV_ERROR;
+ break;
+ }
+ }
+ }
+
+out:
+ cf->shutdown = (result || *done);
+ return result;
+}
+
static void wolfssl_close(struct Curl_cfilter *cf, struct Curl_easy *data)
{
struct ssl_connect_data *connssl = cf->ctx;
- struct wolfssl_ssl_backend_data *backend =
- (struct wolfssl_ssl_backend_data *)connssl->backend;
+ struct wolfssl_ctx *backend =
+ (struct wolfssl_ctx *)connssl->backend;
(void) data;
DEBUGASSERT(backend);
if(backend->handle) {
- char buf[32];
- /* Maybe the server has already sent a close notify alert.
- Read it to avoid an RST on the TCP connection. */
- (void)wolfSSL_read(backend->handle, buf, (int)sizeof(buf));
- if(!connssl->peer_closed)
- (void)wolfSSL_shutdown(backend->handle);
wolfSSL_free(backend->handle);
backend->handle = NULL;
}
@@ -1187,9 +1480,8 @@ static ssize_t wolfssl_recv(struct Curl_cfilter *cf,
CURLcode *curlcode)
{
struct ssl_connect_data *connssl = cf->ctx;
- struct wolfssl_ssl_backend_data *backend =
- (struct wolfssl_ssl_backend_data *)connssl->backend;
- char error_buffer[WOLFSSL_MAX_ERROR_SZ];
+ struct wolfssl_ctx *backend =
+ (struct wolfssl_ctx *)connssl->backend;
int buffsize = (blen > (size_t)INT_MAX) ? INT_MAX : (int)blen;
int nread;
@@ -1211,7 +1503,7 @@ static ssize_t wolfssl_recv(struct Curl_cfilter *cf,
case SSL_ERROR_NONE:
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
- /* there's data pending, re-invoke wolfSSL_read() */
+ /* there is data pending, re-invoke wolfSSL_read() */
CURL_TRC_CF(data, cf, "wolfssl_recv(len=%zu) -> AGAIN", blen);
*curlcode = CURLE_AGAIN;
return -1;
@@ -1221,8 +1513,13 @@ static ssize_t wolfssl_recv(struct Curl_cfilter *cf,
*curlcode = CURLE_AGAIN;
return -1;
}
- failf(data, "SSL read: %s, errno %d",
- wolfSSL_ERR_error_string(err, error_buffer), SOCKERRNO);
+ {
+ char error_buffer[256];
+ failf(data, "SSL read: %s, errno %d",
+ wolfssl_strerror((unsigned long)err, error_buffer,
+ sizeof(error_buffer)),
+ SOCKERRNO);
+ }
*curlcode = CURLE_RECV_ERROR;
return -1;
}
@@ -1269,43 +1566,18 @@ static bool wolfssl_data_pending(struct Curl_cfilter *cf,
const struct Curl_easy *data)
{
struct ssl_connect_data *ctx = cf->ctx;
- struct wolfssl_ssl_backend_data *backend;
+ struct wolfssl_ctx *backend;
(void)data;
DEBUGASSERT(ctx && ctx->backend);
- backend = (struct wolfssl_ssl_backend_data *)ctx->backend;
+ backend = (struct wolfssl_ctx *)ctx->backend;
if(backend->handle) /* SSL is in use */
return (0 != wolfSSL_pending(backend->handle)) ? TRUE : FALSE;
else
return FALSE;
}
-
-/*
- * This function is called to shut down the SSL layer but keep the
- * socket open (CCC - Clear Command Channel)
- */
-static int wolfssl_shutdown(struct Curl_cfilter *cf,
- struct Curl_easy *data)
-{
- struct ssl_connect_data *ctx = cf->ctx;
- struct wolfssl_ssl_backend_data *backend;
- int retval = 0;
-
- (void)data;
- DEBUGASSERT(ctx && ctx->backend);
-
- backend = (struct wolfssl_ssl_backend_data *)ctx->backend;
- if(backend->handle) {
- wolfSSL_ERR_clear_error();
- wolfSSL_free(backend->handle);
- backend->handle = NULL;
- }
- return retval;
-}
-
-
static CURLcode
wolfssl_connect_common(struct Curl_cfilter *cf,
struct Curl_easy *data,
@@ -1324,7 +1596,7 @@ wolfssl_connect_common(struct Curl_cfilter *cf,
}
if(ssl_connect_1 == connssl->connecting_state) {
- /* Find out how much more time we're allowed */
+ /* Find out how much more time we are allowed */
const timediff_t timeout_ms = Curl_timeleft(data, NULL, TRUE);
if(timeout_ms < 0) {
@@ -1338,9 +1610,7 @@ wolfssl_connect_common(struct Curl_cfilter *cf,
return result;
}
- while(ssl_connect_2 == connssl->connecting_state ||
- ssl_connect_2_reading == connssl->connecting_state ||
- ssl_connect_2_writing == connssl->connecting_state) {
+ while(ssl_connect_2 == connssl->connecting_state) {
/* check allowed time left */
const timediff_t timeout_ms = Curl_timeleft(data, NULL, TRUE);
@@ -1351,14 +1621,13 @@ wolfssl_connect_common(struct Curl_cfilter *cf,
return CURLE_OPERATION_TIMEDOUT;
}
- /* if ssl is expecting something, check if it's available. */
- if(connssl->connecting_state == ssl_connect_2_reading
- || connssl->connecting_state == ssl_connect_2_writing) {
+ /* if ssl is expecting something, check if it is available. */
+ if(connssl->io_need) {
- curl_socket_t writefd = ssl_connect_2_writing ==
- connssl->connecting_state?sockfd:CURL_SOCKET_BAD;
- curl_socket_t readfd = ssl_connect_2_reading ==
- connssl->connecting_state?sockfd:CURL_SOCKET_BAD;
+ curl_socket_t writefd = (connssl->io_need & CURL_SSL_IO_NEED_SEND)?
+ sockfd:CURL_SOCKET_BAD;
+ curl_socket_t readfd = (connssl->io_need & CURL_SSL_IO_NEED_RECV)?
+ sockfd:CURL_SOCKET_BAD;
what = Curl_socket_check(readfd, CURL_SOCKET_BAD, writefd,
nonblocking?0:timeout_ms);
@@ -1389,10 +1658,7 @@ wolfssl_connect_common(struct Curl_cfilter *cf,
* have a valid fdset to wait on.
*/
result = wolfssl_connect_step2(cf, data);
- if(result || (nonblocking &&
- (ssl_connect_2 == connssl->connecting_state ||
- ssl_connect_2_reading == connssl->connecting_state ||
- ssl_connect_2_writing == connssl->connecting_state)))
+ if(result || (nonblocking && (ssl_connect_2 == connssl->connecting_state)))
return result;
} /* repeat step2 until all transactions are done. */
@@ -1472,15 +1738,15 @@ static CURLcode wolfssl_sha256sum(const unsigned char *tmp, /* input */
static void *wolfssl_get_internals(struct ssl_connect_data *connssl,
CURLINFO info UNUSED_PARAM)
{
- struct wolfssl_ssl_backend_data *backend =
- (struct wolfssl_ssl_backend_data *)connssl->backend;
+ struct wolfssl_ctx *backend =
+ (struct wolfssl_ctx *)connssl->backend;
(void)info;
DEBUGASSERT(backend);
return backend->handle;
}
const struct Curl_ssl Curl_ssl_wolfssl = {
- { CURLSSLBACKEND_WOLFSSL, "WolfSSL" }, /* info */
+ { CURLSSLBACKEND_WOLFSSL, "wolfssl" }, /* info */
#ifdef KEEP_PEER_CERT
SSLSUPP_PINNEDPUBKEY |
@@ -1493,9 +1759,10 @@ const struct Curl_ssl Curl_ssl_wolfssl = {
#ifdef USE_ECH
SSLSUPP_ECH |
#endif
- SSLSUPP_SSL_CTX,
+ SSLSUPP_SSL_CTX |
+ SSLSUPP_CA_CACHE,
- sizeof(struct wolfssl_ssl_backend_data),
+ sizeof(struct wolfssl_ctx),
wolfssl_init, /* init */
wolfssl_cleanup, /* cleanup */
@@ -1518,7 +1785,6 @@ const struct Curl_ssl Curl_ssl_wolfssl = {
wolfssl_sha256sum, /* sha256sum */
NULL, /* associate_connection */
NULL, /* disassociate_connection */
- NULL, /* free_multi_ssl_backend_data */
wolfssl_recv, /* recv decrypted data */
wolfssl_send, /* send data to encrypt */
};
diff --git a/libs/libcurl/src/vtls/wolfssl.h b/libs/libcurl/src/vtls/wolfssl.h
index 8633d3375c..fcc1b6a625 100644
--- a/libs/libcurl/src/vtls/wolfssl.h
+++ b/libs/libcurl/src/vtls/wolfssl.h
@@ -26,8 +26,25 @@
#include "curl_setup.h"
#ifdef USE_WOLFSSL
+#include <wolfssl/version.h>
+#include <wolfssl/options.h>
+#include <wolfssl/ssl.h>
+#include <wolfssl/error-ssl.h>
+
+#include "urldata.h"
extern const struct Curl_ssl Curl_ssl_wolfssl;
+struct wolfssl_ctx {
+ WOLFSSL_CTX *ctx;
+ WOLFSSL *handle;
+ CURLcode io_result; /* result of last BIO cfilter operation */
+ BIT(x509_store_setup); /* x509 store has been set up */
+};
+
+CURLcode Curl_wssl_setup_x509_store(struct Curl_cfilter *cf,
+ struct Curl_easy *data,
+ struct wolfssl_ctx *wssl);
+
#endif /* USE_WOLFSSL */
#endif /* HEADER_CURL_WOLFSSL_H */
diff --git a/libs/libcurl/src/vtls/x509asn1.c b/libs/libcurl/src/vtls/x509asn1.c
index 0887b34049..f8b0dd38e5 100644
--- a/libs/libcurl/src/vtls/x509asn1.c
+++ b/libs/libcurl/src/vtls/x509asn1.c
@@ -25,13 +25,15 @@
#include "curl_setup.h"
#if defined(USE_GNUTLS) || defined(USE_WOLFSSL) || \
- defined(USE_SCHANNEL) || defined(USE_SECTRANSP)
+ defined(USE_SCHANNEL) || defined(USE_SECTRANSP) || \
+ defined(USE_MBEDTLS)
#if defined(USE_WOLFSSL) || defined(USE_SCHANNEL)
#define WANT_PARSEX509 /* uses Curl_parseX509() */
#endif
-#if defined(USE_GNUTLS) || defined(USE_SCHANNEL) || defined(USE_SECTRANSP)
+#if defined(USE_GNUTLS) || defined(USE_SCHANNEL) || defined(USE_SECTRANSP) || \
+ defined(USE_MBEDTLS)
#define WANT_EXTRACT_CERTINFO /* uses Curl_extract_certinfo() */
#define WANT_PARSEX509 /* ... uses Curl_parseX509() */
#endif
@@ -110,15 +112,16 @@ struct Curl_OID {
};
/* ASN.1 OIDs. */
-static const char cnOID[] = "2.5.4.3"; /* Common name. */
-static const char sanOID[] = "2.5.29.17"; /* Subject alternative name. */
-
static const struct Curl_OID OIDtable[] = {
{ "1.2.840.10040.4.1", "dsa" },
{ "1.2.840.10040.4.3", "dsa-with-sha1" },
{ "1.2.840.10045.2.1", "ecPublicKey" },
{ "1.2.840.10045.3.0.1", "c2pnb163v1" },
{ "1.2.840.10045.4.1", "ecdsa-with-SHA1" },
+ { "1.2.840.10045.4.3.1", "ecdsa-with-SHA224" },
+ { "1.2.840.10045.4.3.2", "ecdsa-with-SHA256" },
+ { "1.2.840.10045.4.3.3", "ecdsa-with-SHA384" },
+ { "1.2.840.10045.4.3.4", "ecdsa-with-SHA512" },
{ "1.2.840.10046.2.1", "dhpublicnumber" },
{ "1.2.840.113549.1.1.1", "rsaEncryption" },
{ "1.2.840.113549.1.1.2", "md2WithRSAEncryption" },
@@ -132,7 +135,7 @@ static const struct Curl_OID OIDtable[] = {
{ "1.2.840.113549.2.2", "md2" },
{ "1.2.840.113549.2.5", "md5" },
{ "1.3.14.3.2.26", "sha1" },
- { cnOID, "CN" },
+ { "2.5.4.3", "CN" },
{ "2.5.4.4", "SN" },
{ "2.5.4.5", "serialNumber" },
{ "2.5.4.6", "C" },
@@ -153,7 +156,7 @@ static const struct Curl_OID OIDtable[] = {
{ "2.5.4.65", "pseudonym" },
{ "1.2.840.113549.1.9.1", "emailAddress" },
{ "2.5.4.72", "role" },
- { sanOID, "subjectAltName" },
+ { "2.5.29.17", "subjectAltName" },
{ "2.5.29.18", "issuerAltName" },
{ "2.5.29.19", "basicConstraints" },
{ "2.16.840.1.101.3.4.2.4", "sha224" },
@@ -372,7 +375,7 @@ utf8asn1str(struct dynbuf *to, int type, const char *from, const char *end)
else {
while(!result && (from < end)) {
char buf[4]; /* decode buffer */
- int charsize = 1;
+ size_t charsize = 1;
unsigned int wc = 0;
switch(size) {
@@ -390,7 +393,6 @@ utf8asn1str(struct dynbuf *to, int type, const char *from, const char *end)
if(wc >= 0x00000800) {
if(wc >= 0x00010000) {
if(wc >= 0x00200000) {
- free(buf);
/* Invalid char. size for target encoding. */
return CURLE_WEIRD_SERVER_REPLY;
}
@@ -469,7 +471,7 @@ static CURLcode OID2str(struct dynbuf *store,
if(op)
result = Curl_dyn_add(store, op->textoid);
else
- result = CURLE_BAD_FUNCTION_ARGUMENT;
+ result = Curl_dyn_add(store, Curl_dyn_ptr(&buf));
Curl_dyn_free(&buf);
}
}
@@ -598,7 +600,7 @@ static CURLcode ASN1tostr(struct dynbuf *store,
{
CURLcode result = CURLE_BAD_FUNCTION_ARGUMENT;
if(elem->constructed)
- return CURLE_OK; /* No conversion of structured elements. */
+ return result; /* No conversion of structured elements. */
if(!type)
type = elem->tag; /* Type not forced: use element tag as type. */
@@ -692,6 +694,11 @@ static CURLcode encodeDN(struct dynbuf *store, struct Curl_asn1Element *dn)
str = Curl_dyn_ptr(&temp);
+ if(!str) {
+ result = CURLE_BAD_FUNCTION_ARGUMENT;
+ goto error;
+ }
+
/* Encode delimiter.
If attribute has a short uppercase name, delimiter is ", ". */
for(p3 = str; ISUPPER(*p3); p3++)
@@ -959,7 +966,8 @@ static int do_pubkey(struct Curl_easy *data, int certnum,
if(ssl_push_certinfo(data, certnum, "ECC Public Key", q))
return 1;
}
- return do_pubkey_field(data, certnum, "ecPublicKey", pubkey);
+ return do_pubkey_field(data, certnum, "ecPublicKey", pubkey) == CURLE_OK
+ ? 0 : 1;
}
/* Get the public key (single element). */
@@ -1223,6 +1231,8 @@ CURLcode Curl_extract_certinfo(struct Curl_easy *data,
result = ssl_push_certinfo_dyn(data, certnum, "Cert", &out);
done:
+ if(result)
+ failf(data, "Failed extracting certificate chain");
Curl_dyn_free(&out);
return result;
}
diff --git a/libs/libcurl/src/vtls/x509asn1.h b/libs/libcurl/src/vtls/x509asn1.h
index 646aac390a..8d245c2d6d 100644
--- a/libs/libcurl/src/vtls/x509asn1.h
+++ b/libs/libcurl/src/vtls/x509asn1.h
@@ -28,7 +28,8 @@
#include "curl_setup.h"
#if defined(USE_GNUTLS) || defined(USE_WOLFSSL) || \
- defined(USE_SCHANNEL) || defined(USE_SECTRANSP)
+ defined(USE_SCHANNEL) || defined(USE_SECTRANSP) || \
+ defined(USE_MBEDTLS)
#include "cfilters.h"
#include "urldata.h"
diff --git a/libs/libcurl/src/ws.c b/libs/libcurl/src/ws.c
index aad4b96c33..37b1039a51 100644
--- a/libs/libcurl/src/ws.c
+++ b/libs/libcurl/src/ws.c
@@ -102,7 +102,7 @@ static unsigned char ws_frame_flags2op(int flags)
size_t i;
for(i = 0; i < sizeof(WS_FRAMES)/sizeof(WS_FRAMES[0]); ++i) {
if(WS_FRAMES[i].flags & flags)
- return WS_FRAMES[i].proto_opcode;
+ return (unsigned char)WS_FRAMES[i].proto_opcode;
}
return 0;
}
@@ -171,7 +171,7 @@ static CURLcode ws_dec_read_head(struct ws_decoder *dec,
dec->head[0] = *inbuf;
Curl_bufq_skip(inraw, 1);
- dec->frame_flags = ws_frame_op2flags(dec->head[0]);
+ dec->frame_flags = ws_frame_op2flags(dec->head[0]);
if(!dec->frame_flags) {
failf(data, "WS: unknown opcode: %x", dec->head[0]);
ws_dec_reset(dec);
@@ -560,7 +560,7 @@ static ssize_t ws_enc_write_head(struct Curl_easy *data,
return -1;
}
- opcode = ws_frame_flags2op(flags);
+ opcode = ws_frame_flags2op((int)flags);
if(!opcode) {
failf(data, "WS: provided flags not recognized '%x'", flags);
*err = CURLE_SEND_ERROR;
@@ -579,7 +579,7 @@ static ssize_t ws_enc_write_head(struct Curl_easy *data,
enc->contfragment = FALSE;
}
else if(enc->contfragment) {
- /* the previous fragment was not a final one and this isn't either, keep a
+ /* the previous fragment was not a final one and this is not either, keep a
CONT opcode and no FIN bit */
firstbyte |= WSBIT_OPCODE_CONT;
}
diff --git a/libs/libcurl/src/ws.h b/libs/libcurl/src/ws.h
index b0d5e8d1b5..c3b310a8b8 100644
--- a/libs/libcurl/src/ws.h
+++ b/libs/libcurl/src/ws.h
@@ -57,7 +57,7 @@ struct ws_encoder {
curl_off_t payload_len; /* payload length of current frame */
curl_off_t payload_remain; /* remaining payload of current */
unsigned int xori; /* xor index */
- unsigned char mask[4]; /* 32 bit mask for this connection */
+ unsigned char mask[4]; /* 32-bit mask for this connection */
unsigned char firstbyte; /* first byte of frame we encode */
bool contfragment; /* set TRUE if the previous fragment sent was not final */
};